You've already forked openaccounting-server
forked from cybercinch/openaccounting-server
refactor-upgrade-to-gorm #5
39
.dockerignore
Normal file
39
.dockerignore
Normal file
@@ -0,0 +1,39 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
README.md
|
||||
*.md
|
||||
|
||||
# Docker
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
|
||||
# Build artifacts
|
||||
server
|
||||
*.exe
|
||||
|
||||
# Development files
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Local config and data
|
||||
config.json
|
||||
*.db
|
||||
data/
|
||||
|
||||
# Test files
|
||||
*_test.go
|
||||
test*
|
||||
|
||||
# Temporary files
|
||||
*.tmp
|
||||
*.log
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Dependencies (will be downloaded)
|
||||
vendor/
|
||||
64
Dockerfile
Normal file
64
Dockerfile
Normal file
@@ -0,0 +1,64 @@
|
||||
# Build stage
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
# Install build dependencies for CGO (needed for SQLite)
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy go mod files
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -a -installsuffix cgo -o server ./core/
|
||||
|
||||
# Final stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Install ca-certificates for HTTPS and sqlite for database
|
||||
RUN apk --no-cache add ca-certificates sqlite
|
||||
|
||||
# Create app user for security
|
||||
RUN adduser -D -s /bin/sh appuser
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /app/server .
|
||||
|
||||
# Create data directory for SQLite
|
||||
RUN mkdir -p /app/data && chown appuser:appuser /app/data
|
||||
|
||||
# Copy config sample (optional)
|
||||
COPY config.json.sample .
|
||||
|
||||
# Change ownership to app user
|
||||
RUN chown -R appuser:appuser /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER appuser
|
||||
|
||||
# Expose port (default 8080, can be overridden with OA_PORT)
|
||||
EXPOSE 8080
|
||||
|
||||
# Health check - requires Accept-Version header
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider --header="Accept-Version: v1" http://localhost:8080/ || exit 1
|
||||
|
||||
# Set default environment variables
|
||||
ENV OA_DATABASE_DRIVER=sqlite \
|
||||
OA_DATABASE_FILE=/app/data/openaccounting.db \
|
||||
OA_ADDRESS=0.0.0.0 \
|
||||
OA_PORT=8080 \
|
||||
OA_API_PREFIX=/api/v1
|
||||
|
||||
# Run the application
|
||||
CMD ["./server"]
|
||||
391
README.md
391
README.md
@@ -1,30 +1,397 @@
|
||||
# Open Accounting Server
|
||||
|
||||
Open Accounting Server is a modern financial accounting system built with Go, featuring GORM integration, Viper configuration management, and Docker support.
|
||||
|
||||
## Features
|
||||
|
||||
- **GORM Integration**: Modern ORM with SQLite and MySQL support
|
||||
- **Viper Configuration**: Flexible config management with environment variables
|
||||
- **Docker Ready**: Containerized deployment with multi-stage builds
|
||||
- **SQLite Support**: Easy local development and testing
|
||||
- **Security**: Environment variable support for sensitive data
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Go 1.8+
|
||||
2. MySQL 5.7+
|
||||
- **Go 1.24+** (updated from 1.8+)
|
||||
- **SQLite** (for development) or **MySQL 5.7+** (for production)
|
||||
- **Docker** (optional, for containerized deployment)
|
||||
- **Just** (optional, for build automation)
|
||||
|
||||
## Database setup
|
||||
## Quick Start
|
||||
|
||||
Use schema.sql and indexes.sql to create a MySQL database to store Open Accounting data.
|
||||
### Using Just (Recommended)
|
||||
|
||||
```bash
|
||||
# Setup development environment
|
||||
just dev-setup
|
||||
|
||||
# Run in development mode
|
||||
just run-dev
|
||||
|
||||
# Build and run with Docker
|
||||
just docker-run
|
||||
```
|
||||
|
||||
### Manual Setup
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
go mod download
|
||||
|
||||
# Run with SQLite (development)
|
||||
OA_DATABASE_DRIVER=sqlite ./server
|
||||
|
||||
# Run with MySQL (production)
|
||||
OA_DATABASE_DRIVER=mysql OA_PASSWORD=secret ./server
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Copy config.json.sample to config.json and edit to match your information.
|
||||
The server now uses **Viper** for advanced configuration management with multiple sources:
|
||||
|
||||
## Run
|
||||
### Configuration Sources (in order of precedence)
|
||||
|
||||
`go run core/server.go`
|
||||
1. **Environment Variables** (highest priority)
|
||||
2. **Config Files**: `config.json`, `config.yaml`, `config.toml`
|
||||
3. **Default Values** (lowest priority)
|
||||
|
||||
## Build
|
||||
### Config File Locations
|
||||
|
||||
`go build core/server.go`
|
||||
- `./config.json` (current directory)
|
||||
- `/etc/openaccounting/config.json`
|
||||
- `~/.openaccounting/config.json`
|
||||
|
||||
### Environment Variables
|
||||
|
||||
All configuration can be overridden with environment variables using the `OA_` prefix:
|
||||
|
||||
| Environment Variable | Config Field | Default | Description |
|
||||
|---------------------|--------------|---------|-------------|
|
||||
| `OA_ADDRESS` | Address | `localhost` | Server bind address |
|
||||
| `OA_PORT` | Port | `8080` | Server port |
|
||||
| `OA_API_PREFIX` | ApiPrefix | `/api/v1` | API route prefix |
|
||||
| `OA_DATABASE_DRIVER` | DatabaseDriver | `sqlite` | Database type: `sqlite` or `mysql` |
|
||||
| `OA_DATABASE_FILE` | DatabaseFile | `./openaccounting.db` | SQLite database file |
|
||||
| `OA_DATABASE_ADDRESS` | DatabaseAddress | `localhost:3306` | MySQL server address |
|
||||
| `OA_DATABASE` | Database | | MySQL database name |
|
||||
| `OA_USER` | User | | Database username |
|
||||
| `OA_PASSWORD` | Password | | Database password ⚠️ |
|
||||
| `OA_MAILGUN_DOMAIN` | MailgunDomain | | Mailgun domain |
|
||||
| `OA_MAILGUN_KEY` | MailgunKey | | Mailgun API key ⚠️ |
|
||||
| `OA_MAILGUN_EMAIL` | MailgunEmail | | Mailgun email |
|
||||
| `OA_MAILGUN_SENDER` | MailgunSender | | Mailgun sender name |
|
||||
|
||||
⚠️ **Security**: Always use environment variables for sensitive data like passwords and API keys.
|
||||
|
||||
### Configuration Examples
|
||||
|
||||
#### Development (SQLite)
|
||||
```bash
|
||||
# Minimal - uses defaults
|
||||
./server
|
||||
|
||||
# Custom database file and port
|
||||
OA_DATABASE_FILE=./dev.db OA_PORT=9090 ./server
|
||||
```
|
||||
|
||||
#### Production (MySQL)
|
||||
```bash
|
||||
# With environment variables (recommended)
|
||||
export OA_DATABASE_DRIVER=mysql
|
||||
export OA_DATABASE_ADDRESS=db.example.com:3306
|
||||
export OA_DATABASE=openaccounting_prod
|
||||
export OA_USER=openaccounting
|
||||
export OA_PASSWORD=secure_password
|
||||
export OA_MAILGUN_KEY=key-abc123
|
||||
./server
|
||||
|
||||
# Or inline
|
||||
OA_DATABASE_DRIVER=mysql OA_PASSWORD=secret OA_MAILGUN_KEY=key-123 ./server
|
||||
```
|
||||
|
||||
#### Docker
|
||||
```bash
|
||||
# SQLite with volume mount
|
||||
docker run -p 8080:8080 \
|
||||
-e OA_DATABASE_DRIVER=sqlite \
|
||||
-v ./data:/app/data \
|
||||
openaccounting-server:latest
|
||||
|
||||
# MySQL with environment variables
|
||||
docker run -p 8080:8080 \
|
||||
-e OA_DATABASE_DRIVER=mysql \
|
||||
-e OA_DATABASE_ADDRESS=mysql:3306 \
|
||||
-e OA_PASSWORD=secret \
|
||||
openaccounting-server:latest
|
||||
```
|
||||
|
||||
## Database Setup
|
||||
|
||||
### SQLite (Development)
|
||||
|
||||
SQLite databases are created automatically. No manual setup required.
|
||||
|
||||
```bash
|
||||
# Uses ./openaccounting.db by default
|
||||
OA_DATABASE_DRIVER=sqlite ./server
|
||||
|
||||
# Custom location
|
||||
OA_DATABASE_DRIVER=sqlite OA_DATABASE_FILE=./data/myapp.db ./server
|
||||
```
|
||||
|
||||
### MySQL (Production)
|
||||
|
||||
Use the provided schema files to create your MySQL database:
|
||||
|
||||
```sql
|
||||
-- Create database and user
|
||||
CREATE DATABASE openaccounting;
|
||||
CREATE USER 'openaccounting'@'%' IDENTIFIED BY 'secure_password';
|
||||
GRANT ALL PRIVILEGES ON openaccounting.* TO 'openaccounting'@'%';
|
||||
```
|
||||
|
||||
The server will automatically create tables and run migrations on startup.
|
||||
|
||||
## Building
|
||||
|
||||
### Local Build
|
||||
|
||||
```bash
|
||||
# Development build
|
||||
go build -o server ./core/
|
||||
|
||||
# Production build (optimized)
|
||||
CGO_ENABLED=1 GOOS=linux go build -a -installsuffix cgo -ldflags="-w -s" -o server ./core/
|
||||
```
|
||||
|
||||
### Docker Build
|
||||
|
||||
```bash
|
||||
# Build image
|
||||
docker build -t openaccounting-server:latest .
|
||||
|
||||
# Multi-platform build
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t openaccounting-server:latest .
|
||||
```
|
||||
|
||||
## Running
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
# Local with SQLite
|
||||
just run-dev
|
||||
|
||||
# Or manually
|
||||
OA_DATABASE_DRIVER=sqlite OA_PORT=8080 ./server
|
||||
```
|
||||
|
||||
### Production
|
||||
|
||||
```bash
|
||||
# With Docker Compose (recommended)
|
||||
docker-compose up -d
|
||||
|
||||
# Or manually with environment file
|
||||
export $(cat .env | xargs)
|
||||
./server
|
||||
```
|
||||
|
||||
## Just Recipes
|
||||
|
||||
This project includes a `justfile` with common tasks:
|
||||
|
||||
```bash
|
||||
just --list # Show all available recipes
|
||||
just build # Build the application
|
||||
just run-dev # Run in development mode
|
||||
just docker-build # Build Docker image
|
||||
just docker-run # Run container
|
||||
just test # Run tests
|
||||
just config-help # Show configuration help
|
||||
just dev-setup # Complete development setup
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
The server provides a REST API at `/api/v1/` (configurable via `OA_API_PREFIX`).
|
||||
|
||||
### Health Check
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/api/v1/health
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Prerequisites
|
||||
|
||||
```bash
|
||||
# Install Go dependencies
|
||||
go mod download
|
||||
|
||||
# Install development tools (optional)
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
just test
|
||||
# or
|
||||
go test ./...
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
|
||||
```bash
|
||||
# Format code
|
||||
just fmt
|
||||
|
||||
# Lint code (requires golangci-lint)
|
||||
just lint
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
If you are interested in running Open Accounting via Docker, @alokmenghrajani has created a [repo](https://github.com/alokmenghrajani/openaccounting-docker) for this.
|
||||
### Official Images
|
||||
|
||||
## Help
|
||||
Docker images are available with multi-stage builds for optimal size and security:
|
||||
|
||||
[Join our Slack chatroom](https://join.slack.com/t/openaccounting/shared_invite/zt-23zy988e8-93HP1GfLDB7osoQ6umpfiA) and talk with us!
|
||||
- Non-root user for security
|
||||
- Alpine Linux base for minimal attack surface
|
||||
- Health checks included
|
||||
- Volume support for data persistence
|
||||
|
||||
### Environment Variables in Docker
|
||||
|
||||
```dockerfile
|
||||
ENV OA_DATABASE_DRIVER=sqlite \
|
||||
OA_DATABASE_FILE=/app/data/openaccounting.db \
|
||||
OA_ADDRESS=0.0.0.0 \
|
||||
OA_PORT=8080
|
||||
```
|
||||
|
||||
### Data Persistence
|
||||
|
||||
```bash
|
||||
# Mount volume for SQLite data
|
||||
docker run -v ./data:/app/data openaccounting-server:latest
|
||||
|
||||
# Use named volume
|
||||
docker volume create openaccounting-data
|
||||
docker run -v openaccounting-data:/app/data openaccounting-server:latest
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### Docker Compose
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
openaccounting:
|
||||
image: openaccounting-server:latest
|
||||
ports:
|
||||
- "8080:8080"
|
||||
environment:
|
||||
OA_DATABASE_DRIVER: mysql
|
||||
OA_DATABASE_ADDRESS: mysql:3306
|
||||
OA_DATABASE: openaccounting
|
||||
OA_USER: openaccounting
|
||||
OA_PASSWORD: ${DB_PASSWORD}
|
||||
depends_on:
|
||||
- mysql
|
||||
|
||||
mysql:
|
||||
image: mysql:8.0
|
||||
environment:
|
||||
MYSQL_DATABASE: openaccounting
|
||||
MYSQL_USER: openaccounting
|
||||
MYSQL_PASSWORD: ${DB_PASSWORD}
|
||||
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASSWORD}
|
||||
volumes:
|
||||
- mysql_data:/var/lib/mysql
|
||||
|
||||
volumes:
|
||||
mysql_data:
|
||||
```
|
||||
|
||||
### Kubernetes
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: openaccounting-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: openaccounting-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: openaccounting-server
|
||||
spec:
|
||||
containers:
|
||||
- name: openaccounting-server
|
||||
image: openaccounting-server:latest
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
env:
|
||||
- name: OA_DATABASE_DRIVER
|
||||
value: "mysql"
|
||||
- name: OA_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: openaccounting-secrets
|
||||
key: db-password
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Config file not found**: The server will use environment variables and defaults if no config file is found
|
||||
2. **Database connection failed**: Check your database credentials and connectivity
|
||||
3. **Permission denied**: Ensure proper file permissions for SQLite database files
|
||||
|
||||
### Debug Mode
|
||||
|
||||
```bash
|
||||
# Enable verbose logging
|
||||
OA_LOG_LEVEL=debug ./server
|
||||
|
||||
# Check configuration
|
||||
just config-help
|
||||
```
|
||||
|
||||
### Health Checks
|
||||
|
||||
```bash
|
||||
# Application health
|
||||
curl http://localhost:8080/api/v1/health
|
||||
|
||||
# Docker health check
|
||||
docker inspect --format='{{.State.Health.Status}}' container_name
|
||||
```
|
||||
|
||||
## Migration from Legacy Setup
|
||||
|
||||
The server maintains backward compatibility with existing `config.json` files while adding Viper features:
|
||||
|
||||
1. Existing `config.json` files continue to work
|
||||
2. Add environment variables for sensitive data
|
||||
3. Use SQLite for easier local development
|
||||
4. Leverage Docker for production deployments
|
||||
|
||||
## Help & Support
|
||||
|
||||
- **Documentation**: This README and inline code comments
|
||||
- **Issues**: GitHub Issues for bug reports and feature requests
|
||||
- **Community**: [Join our Slack chatroom](https://join.slack.com/t/openaccounting/shared_invite/zt-23zy988e8-93HP1GfLDB7osoQ6umpfiA)
|
||||
|
||||
## License
|
||||
|
||||
See LICENSE file for details.
|
||||
@@ -1,16 +1,31 @@
|
||||
{
|
||||
"_comment_config": "OpenAccounting Server Configuration - now supports Viper for multiple config sources",
|
||||
"_comment_viper": "You can override any setting with environment variables using OA_ prefix (e.g., OA_PASSWORD, OA_MAILGUN_KEY)",
|
||||
|
||||
"WebUrl": "https://domain.com",
|
||||
"Address": "",
|
||||
"Address": "localhost",
|
||||
"Port": 8080,
|
||||
"ApiPrefix": "",
|
||||
"ApiPrefix": "/api/v1",
|
||||
"KeyFile": "",
|
||||
"CertFile": "",
|
||||
"DatabaseAddress": "",
|
||||
|
||||
"_comment_database": "Database configuration - choose 'sqlite' for local testing or 'mysql' for production",
|
||||
"DatabaseDriver": "sqlite",
|
||||
"DatabaseFile": "./data/openaccounting.db",
|
||||
"DatabaseAddress": "localhost:3306",
|
||||
"Database": "openaccounting",
|
||||
"User": "openaccounting",
|
||||
"Password": "openaccounting",
|
||||
"Password": "",
|
||||
"_comment_password": "SECURITY: Set password via OA_PASSWORD environment variable instead of this file",
|
||||
|
||||
"_comment_mailgun": "Mailgun configuration for email sending",
|
||||
"MailgunDomain": "mg.domain.com",
|
||||
"MailgunKey": "",
|
||||
"_comment_mailgun_key": "SECURITY: Set Mailgun key via OA_MAILGUN_KEY environment variable",
|
||||
"MailgunEmail": "noreply@domain.com",
|
||||
"MailgunSender": "Sender"
|
||||
"MailgunSender": "Sender",
|
||||
|
||||
"_comment_env_examples": "Environment variable examples:",
|
||||
"_example_development": "OA_DATABASE_DRIVER=sqlite OA_DATABASE_FILE=./dev.db ./server",
|
||||
"_example_production": "OA_DATABASE_DRIVER=mysql OA_PASSWORD=secret OA_MAILGUN_KEY=key-123 ./server"
|
||||
}
|
||||
|
||||
19
config.mysql.json.sample
Normal file
19
config.mysql.json.sample
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"WebUrl": "https://domain.com",
|
||||
"Address": "",
|
||||
"Port": 8080,
|
||||
"ApiPrefix": "",
|
||||
"KeyFile": "",
|
||||
"CertFile": "",
|
||||
"_comment_database": "MySQL configuration for production use",
|
||||
"DatabaseDriver": "mysql",
|
||||
"DatabaseAddress": "localhost:3306",
|
||||
"Database": "openaccounting",
|
||||
"User": "openaccounting",
|
||||
"Password": "openaccounting",
|
||||
"_comment_mailgun": "Mailgun configuration for email sending",
|
||||
"MailgunDomain": "mg.domain.com",
|
||||
"MailgunKey": "",
|
||||
"MailgunEmail": "noreply@domain.com",
|
||||
"MailgunSender": "Sender"
|
||||
}
|
||||
@@ -2,7 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -12,48 +12,7 @@ import (
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
)
|
||||
|
||||
/**
|
||||
* @api {get} /orgs/:orgId/accounts Get Accounts by Org id
|
||||
* @apiVersion 1.4.0
|
||||
* @apiName GetOrgAccounts
|
||||
* @apiGroup Account
|
||||
*
|
||||
* @apiHeader {String} Authorization HTTP Basic Auth
|
||||
* @apiHeader {String} Accept-Version ^1.4.0 semver versioning
|
||||
*
|
||||
* @apiSuccess {String} id Id of the Account.
|
||||
* @apiSuccess {String} orgId Id of the Org.
|
||||
* @apiSuccess {Date} inserted Date Account was created
|
||||
* @apiSuccess {Date} updated Date Account was updated
|
||||
* @apiSuccess {String} name Name of the Account.
|
||||
* @apiSuccess {String} parent Id of the parent Account.
|
||||
* @apiSuccess {String} currency Three letter currency code.
|
||||
* @apiSuccess {Number} precision How many digits the currency goes out to.
|
||||
* @apiSuccess {Boolean} debitBalance True if Account has a debit balance.
|
||||
* @apiSuccess {Number} balance Current Account balance in this Account's currency
|
||||
* @apiSuccess {Number} nativeBalance Current Account balance in the Org's currency
|
||||
*
|
||||
* @apiSuccessExample Success-Response:
|
||||
* HTTP/1.1 200 OK
|
||||
* [
|
||||
* {
|
||||
* "id": "22222222222222222222222222222222",
|
||||
* "orgId": "11111111111111111111111111111111",
|
||||
* "inserted": "2018-09-11T18:05:04.420Z",
|
||||
* "updated": "2018-09-11T18:05:04.420Z",
|
||||
* "name": "Cash",
|
||||
* "parent": "11111111111111111111111111111111",
|
||||
* "currency": "USD",
|
||||
* "precision": 2,
|
||||
* "debitBalance": true,
|
||||
* "balance": 10000,
|
||||
* "nativeBalance": 10000
|
||||
* }
|
||||
* ]
|
||||
*
|
||||
* @apiUse NotAuthorizedError
|
||||
* @apiUse InternalServerError
|
||||
*/
|
||||
// GetOrgAccounts /**
|
||||
func GetOrgAccounts(w rest.ResponseWriter, r *rest.Request) {
|
||||
user := r.Env["USER"].(*types.User)
|
||||
orgId := r.PathParam("orgId")
|
||||
@@ -208,7 +167,7 @@ func PostAccount(w rest.ResponseWriter, r *rest.Request) {
|
||||
user := r.Env["USER"].(*types.User)
|
||||
orgId := r.PathParam("orgId")
|
||||
|
||||
content, err := ioutil.ReadAll(r.Body)
|
||||
content, err := io.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -14,6 +14,22 @@ type AuthService struct {
|
||||
bcrypt util.Bcrypt
|
||||
}
|
||||
|
||||
// AuthRepository interface for dependency injection
|
||||
type AuthRepository interface {
|
||||
GetVerifiedUserByEmail(string) (*types.User, error)
|
||||
GetUserByActiveSession(string) (*types.User, error)
|
||||
GetUserByApiKey(string) (*types.User, error)
|
||||
GetUserByEmailVerifyCode(string) (*types.User, error)
|
||||
UpdateSessionActivity(string) error
|
||||
UpdateApiKeyActivity(string) error
|
||||
}
|
||||
|
||||
// GormAuthService uses the repository pattern
|
||||
type GormAuthService struct {
|
||||
repository AuthRepository
|
||||
bcrypt util.Bcrypt
|
||||
}
|
||||
|
||||
type Interface interface {
|
||||
Authenticate(string, string) (*types.User, error)
|
||||
AuthenticateUser(email string, password string) (*types.User, error)
|
||||
@@ -28,6 +44,12 @@ func NewAuthService(db db.Datastore, bcrypt util.Bcrypt) *AuthService {
|
||||
return authService
|
||||
}
|
||||
|
||||
func NewGormAuthService(repository AuthRepository, bcrypt util.Bcrypt) *GormAuthService {
|
||||
authService := &GormAuthService{repository: repository, bcrypt: bcrypt}
|
||||
Instance = authService
|
||||
return authService
|
||||
}
|
||||
|
||||
func (auth *AuthService) Authenticate(emailOrKey string, password string) (*types.User, error) {
|
||||
// authenticate via session, apikey or user
|
||||
user, err := auth.AuthenticateSession(emailOrKey)
|
||||
@@ -106,3 +128,83 @@ func (auth *AuthService) AuthenticateEmailVerifyCode(code string) (*types.User,
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// GormAuthService implementations
|
||||
func (auth *GormAuthService) Authenticate(emailOrKey string, password string) (*types.User, error) {
|
||||
// authenticate via session, apikey or user
|
||||
user, err := auth.AuthenticateSession(emailOrKey)
|
||||
|
||||
if err == nil {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
user, err = auth.AuthenticateApiKey(emailOrKey)
|
||||
|
||||
if err == nil {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
user, err = auth.AuthenticateUser(emailOrKey, password)
|
||||
|
||||
if err == nil {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
user, err = auth.AuthenticateEmailVerifyCode(emailOrKey)
|
||||
|
||||
if err == nil {
|
||||
return user, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Unauthorized")
|
||||
}
|
||||
|
||||
func (auth *GormAuthService) AuthenticateUser(email string, password string) (*types.User, error) {
|
||||
u, err := auth.repository.GetVerifiedUserByEmail(email)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Invalid email or password")
|
||||
}
|
||||
|
||||
err = auth.bcrypt.CompareHashAndPassword([]byte(u.PasswordHash), []byte(password))
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Invalid email or password")
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (auth *GormAuthService) AuthenticateSession(id string) (*types.User, error) {
|
||||
u, err := auth.repository.GetUserByActiveSession(id)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Invalid session")
|
||||
}
|
||||
|
||||
auth.repository.UpdateSessionActivity(id)
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (auth *GormAuthService) AuthenticateApiKey(id string) (*types.User, error) {
|
||||
u, err := auth.repository.GetUserByApiKey(id)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Access denied")
|
||||
}
|
||||
|
||||
auth.repository.UpdateApiKeyActivity(id)
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (auth *GormAuthService) AuthenticateEmailVerifyCode(code string) (*types.User, error) {
|
||||
u, err := auth.repository.GetUserByEmailVerifyCode(code)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Access denied")
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@ package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TdUser struct {
|
||||
@@ -28,18 +29,19 @@ func (td *TdUser) GetVerifiedUserByEmail(email string) (*types.User, error) {
|
||||
|
||||
func (td *TdUser) GetVerifiedUserByEmail_1(email string) (*types.User, error) {
|
||||
return &types.User{
|
||||
"1",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
"John",
|
||||
"Doe",
|
||||
"johndoe@email.com",
|
||||
"password",
|
||||
"$2a$10$KrtvADe7jwrmYIe3GXFbNupOQaPIvyOKeng5826g4VGOD47TpAisG",
|
||||
true,
|
||||
"",
|
||||
false,
|
||||
"",
|
||||
Id: "1",
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
FirstName: "John",
|
||||
LastName: "Doe",
|
||||
Email: "johndoe@email.com",
|
||||
Password: "password",
|
||||
PasswordHash: "$2a$10$KrtvADe7jwrmYIe3GXFbNupOQaPIvyOKeng5826g4VGOD47TpAisG",
|
||||
AgreeToTerms: true,
|
||||
PasswordReset: "",
|
||||
EmailVerified: false,
|
||||
EmailVerifyCode: "",
|
||||
SignupSource: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,31 @@ type Datastore struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// DeleteBudget implements db.Datastore.
|
||||
func (_m *Datastore) DeleteBudget(string) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetBudget implements db.Datastore.
|
||||
func (_m *Datastore) GetBudget(string) (*types.Budget, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetUserByEmailVerifyCode implements db.Datastore.
|
||||
func (_m *Datastore) GetUserByEmailVerifyCode(string) (*types.User, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// InsertAndReplaceBudget implements db.Datastore.
|
||||
func (_m *Datastore) InsertAndReplaceBudget(*types.Budget) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Ping implements db.Datastore.
|
||||
func (_m *Datastore) Ping() error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// AcceptInvite provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Datastore) AcceptInvite(_a0 *types.Invite, _a1 string) error {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
@@ -2,6 +2,7 @@ package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
)
|
||||
|
||||
@@ -18,8 +19,8 @@ func (model *Model) GetBudget(orgId string, userId string) (*types.Budget, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if belongs == false {
|
||||
return nil, errors.New("User does not belong to org")
|
||||
if !belongs {
|
||||
return nil, errors.New("user does not belong to org")
|
||||
}
|
||||
|
||||
return model.db.GetBudget(orgId)
|
||||
@@ -32,8 +33,8 @@ func (model *Model) CreateBudget(budget *types.Budget, userId string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if belongs == false {
|
||||
return errors.New("User does not belong to org")
|
||||
if !belongs {
|
||||
return errors.New("user does not belong to org")
|
||||
}
|
||||
|
||||
if budget.OrgId == "" {
|
||||
@@ -50,8 +51,8 @@ func (model *Model) DeleteBudget(orgId string, userId string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if belongs == false {
|
||||
return errors.New("User does not belong to org")
|
||||
if !belongs {
|
||||
return errors.New("user does not belong to org")
|
||||
}
|
||||
|
||||
return model.db.DeleteBudget(orgId)
|
||||
|
||||
321
core/model/gorm_model.go
Normal file
321
core/model/gorm_model.go
Normal file
@@ -0,0 +1,321 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/repository"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/openaccounting/oa-server/database"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GormModel is the GORM-based implementation of the Model
|
||||
type GormModel struct {
|
||||
repository *repository.GormRepository
|
||||
bcrypt util.Bcrypt
|
||||
config types.Config
|
||||
}
|
||||
|
||||
// NewGormModel creates a new GORM-based model
|
||||
func NewGormModel(gormDB *gorm.DB, bcrypt util.Bcrypt, config types.Config) *GormModel {
|
||||
repo := repository.NewGormRepository(gormDB)
|
||||
return &GormModel{
|
||||
repository: repo,
|
||||
bcrypt: bcrypt,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateModel creates a new model using the existing database connection
|
||||
func CreateGormModel(bcrypt util.Bcrypt, config types.Config) (*GormModel, error) {
|
||||
// Use the existing database connection
|
||||
if database.DB == nil {
|
||||
return nil, errors.New("database connection not initialized")
|
||||
}
|
||||
|
||||
return NewGormModel(database.DB, bcrypt, config), nil
|
||||
}
|
||||
|
||||
// Implement the Interface by delegating to the business logic layer
|
||||
// The business logic layer (existing model methods) will call the repository
|
||||
|
||||
// UserInterface methods - delegate to existing business logic
|
||||
func (m *GormModel) CreateUser(user *types.User) error {
|
||||
// The existing business logic in user.go will be updated to use the repository
|
||||
// For now, delegate directly to repository for basic operations
|
||||
return m.repository.InsertUser(user)
|
||||
}
|
||||
|
||||
func (m *GormModel) VerifyUser(code string) error {
|
||||
return m.repository.VerifyUser(code)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateUser(user *types.User) error {
|
||||
return m.repository.UpdateUser(user)
|
||||
}
|
||||
|
||||
func (m *GormModel) ResetPassword(email string) error {
|
||||
// This would need the full business logic from the original model
|
||||
// For now, simplified implementation
|
||||
user, err := m.repository.GetVerifiedUserByEmail(email)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
user.PasswordReset, err = util.NewGuid()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.repository.UpdateUserResetPassword(user)
|
||||
}
|
||||
|
||||
func (m *GormModel) ConfirmResetPassword(password string, code string) (*types.User, error) {
|
||||
user, err := m.repository.GetUserByResetCode(code)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
passwordHash, err := m.bcrypt.GenerateFromPassword([]byte(password), m.bcrypt.GetDefaultCost())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user.PasswordHash = string(passwordHash)
|
||||
user.Password = ""
|
||||
|
||||
err = m.repository.UpdateUser(user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// AccountInterface methods - delegate to repository
|
||||
func (m *GormModel) CreateAccount(account *types.Account, userId string) error {
|
||||
return m.repository.InsertAccount(account)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateAccount(account *types.Account, userId string) error {
|
||||
return m.repository.UpdateAccount(account)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteAccount(id string, userId string, orgId string) error {
|
||||
return m.repository.DeleteAccount(id)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetAccounts(orgId string, userId string, tokenId string) ([]*types.Account, error) {
|
||||
return m.repository.GetAccountsByOrgId(orgId)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetAccountsWithBalances(orgId string, userId string, tokenId string, date time.Time) ([]*types.Account, error) {
|
||||
accounts, err := m.repository.GetAccountsByOrgId(orgId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add balance calculations
|
||||
err = m.repository.AddBalances(accounts, date)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (m *GormModel) GetAccount(orgId, accId, userId, tokenId string) (*types.Account, error) {
|
||||
return m.repository.GetAccount(accId)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetAccountWithBalance(orgId, accId, userId, tokenId string, date time.Time) (*types.Account, error) {
|
||||
account, err := m.repository.GetAccount(accId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add balance calculation
|
||||
err = m.repository.AddBalance(account, date)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// Complete OrgInterface implementation
|
||||
func (m *GormModel) CreateOrg(org *types.Org, userId string) error {
|
||||
// Get default accounts - this needs to be implemented properly
|
||||
accounts := []*types.Account{} // Empty for now, should create default chart of accounts
|
||||
return m.repository.CreateOrg(org, userId, accounts)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetOrg(orgId, userId string) (*types.Org, error) {
|
||||
return m.repository.GetOrg(orgId, userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetOrgs(userId string) ([]*types.Org, error) {
|
||||
return m.repository.GetOrgs(userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateOrg(org *types.Org, userId string) error {
|
||||
return m.repository.UpdateOrg(org)
|
||||
}
|
||||
|
||||
func (m *GormModel) CreateInvite(invite *types.Invite, userId string) error {
|
||||
return m.repository.InsertInvite(invite)
|
||||
}
|
||||
|
||||
func (m *GormModel) AcceptInvite(invite *types.Invite, userId string) error {
|
||||
return m.repository.AcceptInvite(invite, userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetInvites(orgId, userId string) ([]*types.Invite, error) {
|
||||
return m.repository.GetInvites(orgId)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteInvite(inviteId, userId string) error {
|
||||
return m.repository.DeleteInvite(inviteId)
|
||||
}
|
||||
|
||||
// SessionInterface implementation
|
||||
func (m *GormModel) CreateSession(session *types.Session) error {
|
||||
return m.repository.InsertSession(session)
|
||||
}
|
||||
|
||||
func (m *GormModel) InsertSession(session *types.Session) error {
|
||||
return m.repository.InsertSession(session)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteSession(sessionId, userId string) error {
|
||||
return m.repository.DeleteSession(sessionId, userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateSessionActivity(sessionId string) error {
|
||||
return m.repository.UpdateSessionActivity(sessionId)
|
||||
}
|
||||
|
||||
// ApiKeyInterface implementation
|
||||
func (m *GormModel) CreateApiKey(apiKey *types.ApiKey) error {
|
||||
return m.repository.InsertApiKey(apiKey)
|
||||
}
|
||||
|
||||
func (m *GormModel) InsertApiKey(apiKey *types.ApiKey) error {
|
||||
return m.repository.InsertApiKey(apiKey)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateApiKey(apiKey *types.ApiKey) error {
|
||||
return m.repository.UpdateApiKey(apiKey)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteApiKey(keyId, userId string) error {
|
||||
return m.repository.DeleteApiKey(keyId, userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetApiKeys(userId string) ([]*types.ApiKey, error) {
|
||||
return m.repository.GetApiKeys(userId)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateApiKeyActivity(keyId string) error {
|
||||
return m.repository.UpdateApiKeyActivity(keyId)
|
||||
}
|
||||
|
||||
// TransactionInterface implementation
|
||||
func (m *GormModel) CreateTransaction(transaction *types.Transaction) error {
|
||||
return m.repository.InsertTransaction(transaction)
|
||||
}
|
||||
|
||||
func (m *GormModel) UpdateTransaction(transactionId string, transaction *types.Transaction) error {
|
||||
return m.repository.DeleteAndInsertTransaction(transactionId, transaction)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetTransactionsByAccount(accountId, orgId, userId string, options *types.QueryOptions) ([]*types.Transaction, error) {
|
||||
return m.repository.GetTransactionsByAccount(accountId, options)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetTransactionsByOrg(orgId, userId string, options *types.QueryOptions) ([]*types.Transaction, error) {
|
||||
return m.repository.GetTransactionsByOrg(orgId, options, []string{})
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteTransaction(transactionId, orgId, userId string) error {
|
||||
return m.repository.DeleteTransaction(transactionId)
|
||||
}
|
||||
|
||||
func (m *GormModel) InsertTransaction(transaction *types.Transaction) error {
|
||||
return m.repository.InsertTransaction(transaction)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetTransactionById(id string) (*types.Transaction, error) {
|
||||
return m.repository.GetTransactionById(id)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteAndInsertTransaction(id string, transaction *types.Transaction) error {
|
||||
return m.repository.DeleteAndInsertTransaction(id, transaction)
|
||||
}
|
||||
|
||||
// PriceInterface implementation
|
||||
func (m *GormModel) CreatePrice(price *types.Price, userId string) error {
|
||||
return m.repository.InsertPrice(price)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeletePrice(priceId, userId string) error {
|
||||
// Stub implementation - would need proper implementation
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GormModel) GetPricesNearestInTime(orgId string, date time.Time, currency string) ([]*types.Price, error) {
|
||||
// Stub implementation - would need proper implementation based on specific logic
|
||||
return m.repository.GetPrices(orgId, date)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetPricesByCurrency(orgId, currency, userId string) ([]*types.Price, error) {
|
||||
// Stub implementation - would need proper implementation based on specific logic
|
||||
return m.repository.GetPrices(orgId, time.Now())
|
||||
}
|
||||
|
||||
func (m *GormModel) GetPrices(orgId string, date time.Time) ([]*types.Price, error) {
|
||||
return m.repository.GetPrices(orgId, date)
|
||||
}
|
||||
|
||||
func (m *GormModel) InsertPrice(price *types.Price) error {
|
||||
return m.repository.InsertPrice(price)
|
||||
}
|
||||
|
||||
// SystemHealthInteface implementation
|
||||
func (m *GormModel) PingDatabase() error {
|
||||
return m.repository.Ping()
|
||||
}
|
||||
|
||||
func (m *GormModel) Ping() error {
|
||||
return m.repository.Ping()
|
||||
}
|
||||
|
||||
// BudgetInterface implementation
|
||||
func (m *GormModel) GetBudget(orgId, userId string) (*types.Budget, error) {
|
||||
// Stub implementation - would need proper implementation
|
||||
return &types.Budget{}, nil
|
||||
}
|
||||
|
||||
func (m *GormModel) CreateBudget(budget *types.Budget, userId string) error {
|
||||
return m.repository.InsertBudget(budget)
|
||||
}
|
||||
|
||||
func (m *GormModel) DeleteBudget(budgetId, userId string) error {
|
||||
// Stub implementation - would need proper implementation
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GormModel) InsertBudget(budget *types.Budget) error {
|
||||
return m.repository.InsertBudget(budget)
|
||||
}
|
||||
|
||||
func (m *GormModel) GetBudgets(orgId string) ([]*types.Budget, error) {
|
||||
return m.repository.GetBudgets(orgId)
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
func (m *GormModel) GetOrgUserIds(orgId string) ([]string, error) {
|
||||
return m.repository.GetOrgUserIds(orgId)
|
||||
}
|
||||
@@ -14,6 +14,7 @@ type Model struct {
|
||||
config types.Config
|
||||
}
|
||||
|
||||
|
||||
type Interface interface {
|
||||
UserInterface
|
||||
OrgInterface
|
||||
@@ -31,3 +32,4 @@ func NewModel(db db.Datastore, bcrypt util.Bcrypt, config types.Config) *Model {
|
||||
Instance = model
|
||||
return model
|
||||
}
|
||||
|
||||
|
||||
@@ -2,44 +2,45 @@ package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/mocks"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestCreatePrice(t *testing.T) {
|
||||
|
||||
price := types.Price{
|
||||
"1",
|
||||
"2",
|
||||
"BTC",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
6700,
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
Currency: "BTC",
|
||||
Date: time.Unix(0, 0),
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
Price: 6700,
|
||||
}
|
||||
|
||||
badPrice := types.Price{
|
||||
"1",
|
||||
"2",
|
||||
"",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
6700,
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
Currency: "",
|
||||
Date: time.Unix(0, 0),
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
Price: 6700,
|
||||
}
|
||||
|
||||
badOrg := types.Price{
|
||||
"1",
|
||||
"1",
|
||||
"BTC",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
6700,
|
||||
Id: "1",
|
||||
OrgId: "1",
|
||||
Currency: "BTC",
|
||||
Date: time.Unix(0, 0),
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
Price: 6700,
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
@@ -89,13 +90,13 @@ func TestCreatePrice(t *testing.T) {
|
||||
func TestDeletePrice(t *testing.T) {
|
||||
|
||||
price := types.Price{
|
||||
"1",
|
||||
"2",
|
||||
"BTC",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
6700,
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
Currency: "BTC",
|
||||
Date: time.Unix(0, 0),
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
Price: 6700,
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
|
||||
@@ -3,9 +3,10 @@ package model
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/ws"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TransactionInterface interface {
|
||||
@@ -105,7 +106,7 @@ func (model *Model) GetTransactionsByAccount(orgId string, userId string, accoun
|
||||
}
|
||||
|
||||
if !model.accountsContainWriteAccess(userAccounts, accountId) {
|
||||
return nil, errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", accountId))
|
||||
return nil, fmt.Errorf("%s %s", "user does not have permission to access account", accountId)
|
||||
}
|
||||
|
||||
return model.db.GetTransactionsByAccount(accountId, options)
|
||||
@@ -142,7 +143,7 @@ func (model *Model) DeleteTransaction(id string, userId string, orgId string) (e
|
||||
|
||||
for _, split := range transaction.Splits {
|
||||
if !model.accountsContainWriteAccess(userAccounts, split.AccountId) {
|
||||
return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", split.AccountId))
|
||||
return fmt.Errorf("%s %s", "user does not have permission to access account", split.AccountId)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,13 +190,13 @@ func (model *Model) checkSplits(transaction *types.Transaction) (err error) {
|
||||
|
||||
for _, split := range transaction.Splits {
|
||||
if !model.accountsContainWriteAccess(userAccounts, split.AccountId) {
|
||||
return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", split.AccountId))
|
||||
return fmt.Errorf("%s %s", "user does not have permission to access account", split.AccountId)
|
||||
}
|
||||
|
||||
account := model.getAccountFromList(userAccounts, split.AccountId)
|
||||
|
||||
if account.HasChildren == true {
|
||||
return errors.New("Cannot use parent account for split")
|
||||
if !account.HasChildren {
|
||||
return errors.New("cannot use parent account for split")
|
||||
}
|
||||
|
||||
if account.Currency == org.Currency && split.NativeAmount != split.Amount {
|
||||
|
||||
@@ -2,12 +2,13 @@ package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TdTransaction struct {
|
||||
@@ -57,72 +58,72 @@ func TestCreateTransaction(t *testing.T) {
|
||||
"successful": {
|
||||
err: nil,
|
||||
tx: &types.Transaction{
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
"description",
|
||||
"",
|
||||
false,
|
||||
[]*types.Split{
|
||||
&types.Split{"1", "1", 1000, 1000},
|
||||
&types.Split{"1", "2", -1000, -1000},
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
UserId: "3",
|
||||
Date: time.Now(),
|
||||
Inserted: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Description: "description",
|
||||
Data: "",
|
||||
Deleted: false,
|
||||
Splits: []*types.Split{
|
||||
&types.Split{TransactionId: "1", AccountId: "1", Amount: 1000, NativeAmount: 1000},
|
||||
&types.Split{TransactionId: "1", AccountId: "2", Amount: -1000, NativeAmount: -1000},
|
||||
},
|
||||
},
|
||||
},
|
||||
"bad split amounts": {
|
||||
err: errors.New("splits must add up to 0"),
|
||||
tx: &types.Transaction{
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
"description",
|
||||
"",
|
||||
false,
|
||||
[]*types.Split{
|
||||
&types.Split{"1", "1", 1000, 1000},
|
||||
&types.Split{"1", "2", -500, -500},
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
UserId: "3",
|
||||
Date: time.Now(),
|
||||
Inserted: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Description: "description",
|
||||
Data: "",
|
||||
Deleted: false,
|
||||
Splits: []*types.Split{
|
||||
&types.Split{TransactionId: "1", AccountId: "1", Amount: 1000, NativeAmount: 1000},
|
||||
&types.Split{TransactionId: "1", AccountId: "2", Amount: -500, NativeAmount: -500},
|
||||
},
|
||||
},
|
||||
},
|
||||
"lacking permission": {
|
||||
err: errors.New("user does not have permission to access account 3"),
|
||||
tx: &types.Transaction{
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
"description",
|
||||
"",
|
||||
false,
|
||||
[]*types.Split{
|
||||
&types.Split{"1", "1", 1000, 1000},
|
||||
&types.Split{"1", "3", -1000, -1000},
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
UserId: "3",
|
||||
Date: time.Now(),
|
||||
Inserted: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Description: "description",
|
||||
Data: "",
|
||||
Deleted: false,
|
||||
Splits: []*types.Split{
|
||||
&types.Split{TransactionId: "1", AccountId: "1", Amount: 1000, NativeAmount: 1000},
|
||||
&types.Split{TransactionId: "1", AccountId: "3", Amount: -1000, NativeAmount: -1000},
|
||||
},
|
||||
},
|
||||
},
|
||||
"nativeAmount mismatch": {
|
||||
err: errors.New("nativeAmount must equal amount for native currency splits"),
|
||||
tx: &types.Transaction{
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
time.Now(),
|
||||
"description",
|
||||
"",
|
||||
false,
|
||||
[]*types.Split{
|
||||
&types.Split{"1", "1", 1000, 500},
|
||||
&types.Split{"1", "2", -1000, -500},
|
||||
Id: "1",
|
||||
OrgId: "2",
|
||||
UserId: "3",
|
||||
Date: time.Now(),
|
||||
Inserted: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Description: "description",
|
||||
Data: "",
|
||||
Deleted: false,
|
||||
Splits: []*types.Split{
|
||||
&types.Split{TransactionId: "1", AccountId: "1", Amount: 1000, NativeAmount: 500},
|
||||
&types.Split{TransactionId: "1", AccountId: "2", Amount: -1000, NativeAmount: -500},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
package types
|
||||
|
||||
type Config struct {
|
||||
WebUrl string
|
||||
Address string
|
||||
Port int
|
||||
ApiPrefix string
|
||||
KeyFile string
|
||||
CertFile string
|
||||
DatabaseAddress string
|
||||
Database string
|
||||
User string
|
||||
Password string
|
||||
MailgunDomain string
|
||||
MailgunKey string
|
||||
MailgunEmail string
|
||||
MailgunSender string
|
||||
WebUrl string `mapstructure:"weburl"`
|
||||
Address string `mapstructure:"address"`
|
||||
Port int `mapstructure:"port"`
|
||||
ApiPrefix string `mapstructure:"apiprefix"`
|
||||
KeyFile string `mapstructure:"keyfile"`
|
||||
CertFile string `mapstructure:"certfile"`
|
||||
// Database configuration
|
||||
DatabaseDriver string `mapstructure:"databasedriver"` // "mysql" or "sqlite"
|
||||
DatabaseAddress string `mapstructure:"databaseaddress"`
|
||||
Database string `mapstructure:"database"`
|
||||
User string `mapstructure:"user"`
|
||||
Password string `mapstructure:"password"` // Sensitive: use OA_PASSWORD env var
|
||||
// SQLite specific
|
||||
DatabaseFile string `mapstructure:"databasefile"`
|
||||
MailgunDomain string `mapstructure:"mailgundomain"`
|
||||
MailgunKey string `mapstructure:"mailgunkey"` // Sensitive: use OA_MAILGUN_KEY env var
|
||||
MailgunEmail string `mapstructure:"mailgunemail"`
|
||||
MailgunSender string `mapstructure:"mailgunsender"`
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func (model *Model) CreateUser(user *types.User) error {
|
||||
return errors.New("email required")
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(".+@.+\\..+")
|
||||
re := regexp.MustCompile(`.+@.+\..+`)
|
||||
|
||||
if re.FindString(user.Email) == "" {
|
||||
return errors.New("invalid email address")
|
||||
@@ -47,7 +47,7 @@ func (model *Model) CreateUser(user *types.User) error {
|
||||
return errors.New("password required")
|
||||
}
|
||||
|
||||
if user.AgreeToTerms != true {
|
||||
if !user.AgreeToTerms {
|
||||
return errors.New("must agree to terms")
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func (model *Model) ResetPassword(email string) error {
|
||||
|
||||
if err != nil {
|
||||
// Don't send back error so people can't try to find user accounts
|
||||
log.Printf("Invalid email for reset password " + email)
|
||||
log.Printf("Invalid email for reset password %s", email)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ func (model *Model) ConfirmResetPassword(password string, code string) (*types.U
|
||||
user, err := model.db.GetUserByResetCode(code)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.New("Invalid code")
|
||||
return nil, errors.New("invalid code")
|
||||
}
|
||||
|
||||
passwordHash, err := model.bcrypt.GenerateFromPassword([]byte(password), model.bcrypt.GetDefaultCost())
|
||||
|
||||
@@ -2,12 +2,13 @@ package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/mocks"
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TdUser struct {
|
||||
@@ -39,33 +40,35 @@ func TestCreateUser(t *testing.T) {
|
||||
// EmailVerifyCode string `json:"-"`
|
||||
|
||||
user := types.User{
|
||||
"0",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
"John",
|
||||
"Doe",
|
||||
"johndoe@email.com",
|
||||
"password",
|
||||
"",
|
||||
true,
|
||||
"",
|
||||
false,
|
||||
"",
|
||||
Id: "0",
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
FirstName: "John",
|
||||
LastName: "Doe",
|
||||
Email: "johndoe@email.com",
|
||||
Password: "password",
|
||||
PasswordHash: "",
|
||||
AgreeToTerms: true,
|
||||
PasswordReset: "",
|
||||
EmailVerified: false,
|
||||
EmailVerifyCode: "",
|
||||
SignupSource: "",
|
||||
}
|
||||
|
||||
badUser := types.User{
|
||||
"0",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
"John",
|
||||
"Doe",
|
||||
"",
|
||||
"password",
|
||||
"",
|
||||
true,
|
||||
"",
|
||||
false,
|
||||
"",
|
||||
Id: "0",
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
FirstName: "John",
|
||||
LastName: "Doe",
|
||||
Email: "",
|
||||
Password: "password",
|
||||
PasswordHash: "",
|
||||
AgreeToTerms: true,
|
||||
PasswordReset: "",
|
||||
EmailVerified: false,
|
||||
EmailVerifyCode: "",
|
||||
SignupSource: "",
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
@@ -109,33 +112,35 @@ func TestCreateUser(t *testing.T) {
|
||||
func TestUpdateUser(t *testing.T) {
|
||||
|
||||
user := types.User{
|
||||
"0",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
"John2",
|
||||
"Doe",
|
||||
"johndoe@email.com",
|
||||
"password",
|
||||
"",
|
||||
true,
|
||||
"",
|
||||
false,
|
||||
"",
|
||||
Id: "0",
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
FirstName: "John2",
|
||||
LastName: "Doe",
|
||||
Email: "johndoe@email.com",
|
||||
Password: "password",
|
||||
PasswordHash: "",
|
||||
AgreeToTerms: true,
|
||||
PasswordReset: "",
|
||||
EmailVerified: false,
|
||||
EmailVerifyCode: "",
|
||||
SignupSource: "",
|
||||
}
|
||||
|
||||
badUser := types.User{
|
||||
"0",
|
||||
time.Unix(0, 0),
|
||||
time.Unix(0, 0),
|
||||
"John2",
|
||||
"Doe",
|
||||
"johndoe@email.com",
|
||||
"",
|
||||
"",
|
||||
true,
|
||||
"",
|
||||
false,
|
||||
"",
|
||||
Id: "0",
|
||||
Inserted: time.Unix(0, 0),
|
||||
Updated: time.Unix(0, 0),
|
||||
FirstName: "John2",
|
||||
LastName: "Doe",
|
||||
Email: "johndoe@email.com",
|
||||
Password: "",
|
||||
PasswordHash: "",
|
||||
AgreeToTerms: true,
|
||||
PasswordReset: "",
|
||||
EmailVerified: false,
|
||||
EmailVerifyCode: "",
|
||||
SignupSource: "",
|
||||
}
|
||||
|
||||
tests := map[string]struct {
|
||||
|
||||
375
core/repository/gorm_repository.go
Normal file
375
core/repository/gorm_repository.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/openaccounting/oa-server/models"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GormRepository implements the same interfaces as core/model/db but uses GORM
|
||||
type GormRepository struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// Note: GormRepository implements most of the Datastore interface
|
||||
// Some methods like DeleteAndInsertTransaction need to be added for full compatibility
|
||||
|
||||
// NewGormRepository creates a new GORM repository
|
||||
func NewGormRepository(db *gorm.DB) *GormRepository {
|
||||
return &GormRepository{db: db}
|
||||
}
|
||||
|
||||
// UserInterface implementation
|
||||
func (r *GormRepository) InsertUser(user *types.User) error {
|
||||
user.Inserted = time.Now()
|
||||
user.Updated = user.Inserted
|
||||
user.PasswordReset = ""
|
||||
|
||||
// Convert types.User to models.User
|
||||
gormUser := &models.User{
|
||||
ID: []byte(user.Id), // Convert string ID to []byte
|
||||
Inserted: uint64(util.TimeToMs(user.Inserted)),
|
||||
Updated: uint64(util.TimeToMs(user.Updated)),
|
||||
FirstName: user.FirstName,
|
||||
LastName: user.LastName,
|
||||
Email: user.Email,
|
||||
PasswordHash: user.PasswordHash,
|
||||
AgreeToTerms: user.AgreeToTerms,
|
||||
PasswordReset: user.PasswordReset,
|
||||
EmailVerified: user.EmailVerified,
|
||||
EmailVerifyCode: user.EmailVerifyCode,
|
||||
SignupSource: user.SignupSource,
|
||||
}
|
||||
|
||||
result := r.db.Create(gormUser)
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
if result.RowsAffected < 1 {
|
||||
return errors.New("unable to insert user into db")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) VerifyUser(code string) error {
|
||||
result := r.db.Model(&models.User{}).
|
||||
Where("email_verify_code = ?", code).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": util.TimeToMs(time.Now()),
|
||||
"email_verified": true,
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
if result.RowsAffected == 0 {
|
||||
return errors.New("invalid code")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateUser(user *types.User) error {
|
||||
user.Updated = time.Now()
|
||||
|
||||
result := r.db.Model(&models.User{}).
|
||||
Where("id = ?", []byte(user.Id)).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": util.TimeToMs(user.Updated),
|
||||
"password_hash": user.PasswordHash,
|
||||
"password_reset": "",
|
||||
})
|
||||
|
||||
return result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateUserResetPassword(user *types.User) error {
|
||||
user.Updated = time.Now()
|
||||
|
||||
result := r.db.Model(&models.User{}).
|
||||
Where("id = ?", []byte(user.Id)).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": util.TimeToMs(user.Updated),
|
||||
"password_reset": user.PasswordReset,
|
||||
})
|
||||
|
||||
return result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetVerifiedUserByEmail(email string) (*types.User, error) {
|
||||
var gormUser models.User
|
||||
result := r.db.Where("email = ? AND email_verified = ?",
|
||||
strings.TrimSpace(strings.ToLower(email)), true).
|
||||
First(&gormUser)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormUserToTypesUser(&gormUser), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetUserByActiveSession(sessionId string) (*types.User, error) {
|
||||
var gormUser models.User
|
||||
result := r.db.Table("users").
|
||||
Select("users.*").
|
||||
Joins("JOIN sessions ON sessions.user_id = users.id").
|
||||
Where("sessions.terminated IS NULL AND sessions.id = ?", []byte(sessionId)).
|
||||
First(&gormUser)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormUserToTypesUser(&gormUser), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetUserByApiKey(keyId string) (*types.User, error) {
|
||||
var gormUser models.User
|
||||
result := r.db.Table("users").
|
||||
Select("users.*").
|
||||
Joins("JOIN api_keys ON api_keys.user_id = users.id").
|
||||
Where("api_keys.deleted_at IS NULL AND api_keys.id = ?", []byte(keyId)).
|
||||
First(&gormUser)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormUserToTypesUser(&gormUser), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetUserByResetCode(code string) (*types.User, error) {
|
||||
var gormUser models.User
|
||||
result := r.db.Where("password_reset = ?", code).First(&gormUser)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormUserToTypesUser(&gormUser), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetUserByEmailVerifyCode(code string) (*types.User, error) {
|
||||
// only allow this for 3 days
|
||||
minInserted := (time.Now().UnixNano() / 1000000) - (3 * 24 * 60 * 60 * 1000)
|
||||
|
||||
var gormUser models.User
|
||||
result := r.db.Where("email_verify_code = ? AND inserted > ?", code, minInserted).
|
||||
First(&gormUser)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormUserToTypesUser(&gormUser), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetOrgAdmins(orgId string) ([]*types.User, error) {
|
||||
var gormUsers []models.User
|
||||
result := r.db.Table("users").
|
||||
Select("users.*").
|
||||
Joins("JOIN user_orgs ON user_orgs.user_id = users.id").
|
||||
Where("user_orgs.admin = ? AND user_orgs.org_id = ?", true, []byte(orgId)).
|
||||
Find(&gormUsers)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
users := make([]*types.User, len(gormUsers))
|
||||
for i, gormUser := range gormUsers {
|
||||
users[i] = r.convertGormUserToTypesUser(&gormUser)
|
||||
}
|
||||
|
||||
return users, nil
|
||||
}
|
||||
|
||||
// Helper function to convert GORM User to types.User
|
||||
func (r *GormRepository) convertGormUserToTypesUser(gormUser *models.User) *types.User {
|
||||
return &types.User{
|
||||
Id: string(gormUser.ID),
|
||||
Inserted: util.MsToTime(int64(gormUser.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormUser.Updated)),
|
||||
FirstName: gormUser.FirstName,
|
||||
LastName: gormUser.LastName,
|
||||
Email: gormUser.Email,
|
||||
PasswordHash: gormUser.PasswordHash,
|
||||
AgreeToTerms: gormUser.AgreeToTerms,
|
||||
PasswordReset: gormUser.PasswordReset,
|
||||
EmailVerified: gormUser.EmailVerified,
|
||||
EmailVerifyCode: gormUser.EmailVerifyCode,
|
||||
SignupSource: gormUser.SignupSource,
|
||||
}
|
||||
}
|
||||
|
||||
// AccountInterface implementation
|
||||
func (r *GormRepository) InsertAccount(account *types.Account) error {
|
||||
account.Inserted = time.Now()
|
||||
account.Updated = account.Inserted
|
||||
|
||||
// Convert types.Account to models.Account
|
||||
gormAccount := &models.Account{
|
||||
ID: []byte(account.Id),
|
||||
OrgID: []byte(account.OrgId),
|
||||
Inserted: uint64(util.TimeToMs(account.Inserted)),
|
||||
Updated: uint64(util.TimeToMs(account.Updated)),
|
||||
Name: account.Name,
|
||||
Parent: []byte(account.Parent),
|
||||
Currency: account.Currency,
|
||||
Precision: account.Precision,
|
||||
DebitBalance: account.DebitBalance,
|
||||
}
|
||||
|
||||
return r.db.Create(gormAccount).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateAccount(account *types.Account) error {
|
||||
account.Updated = time.Now()
|
||||
|
||||
result := r.db.Model(&models.Account{}).
|
||||
Where("id = ?", []byte(account.Id)).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": util.TimeToMs(account.Updated),
|
||||
"name": account.Name,
|
||||
"parent": []byte(account.Parent),
|
||||
"currency": account.Currency,
|
||||
"precision": account.Precision,
|
||||
"debit_balance": account.DebitBalance,
|
||||
})
|
||||
|
||||
return result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetAccount(id string) (*types.Account, error) {
|
||||
var gormAccount models.Account
|
||||
result := r.db.Where("id = ?", []byte(id)).First(&gormAccount)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormAccountToTypesAccount(&gormAccount), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetAccountsByOrgId(orgId string) ([]*types.Account, error) {
|
||||
var gormAccounts []models.Account
|
||||
result := r.db.Where("org_id = ?", []byte(orgId)).Find(&gormAccounts)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
accounts := make([]*types.Account, len(gormAccounts))
|
||||
for i, gormAccount := range gormAccounts {
|
||||
accounts[i] = r.convertGormAccountToTypesAccount(&gormAccount)
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetPermissionedAccountIds(orgId, userId, tokenId string) ([]string, error) {
|
||||
var accountIds []string
|
||||
result := r.db.Table("permissions").
|
||||
Select("DISTINCT LOWER(HEX(account_id)) as account_id").
|
||||
Where("org_id = ? AND user_id = ?", []byte(orgId), []byte(userId)).
|
||||
Pluck("account_id", &accountIds)
|
||||
|
||||
return accountIds, result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetSplitCountByAccountId(id string) (int64, error) {
|
||||
var count int64
|
||||
result := r.db.Model(&models.Split{}).
|
||||
Where("account_id = ?", []byte(id)).
|
||||
Count(&count)
|
||||
|
||||
return count, result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetChildCountByAccountId(id string) (int64, error) {
|
||||
var count int64
|
||||
result := r.db.Model(&models.Account{}).
|
||||
Where("parent = ?", []byte(id)).
|
||||
Count(&count)
|
||||
|
||||
return count, result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteAccount(id string) error {
|
||||
return r.db.Where("id = ?", []byte(id)).Delete(&models.Account{}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetRootAccount(orgId string) (*types.Account, error) {
|
||||
var gormAccount models.Account
|
||||
result := r.db.Where("org_id = ? AND parent = ?", []byte(orgId), []byte{0}).
|
||||
First(&gormAccount)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormAccountToTypesAccount(&gormAccount), nil
|
||||
}
|
||||
|
||||
// Balance-related methods (simplified implementations)
|
||||
func (r *GormRepository) AddBalances(accounts []*types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) AddNativeBalancesCost(accounts []*types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) AddNativeBalancesNearestInTime(accounts []*types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) AddBalance(account *types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) AddNativeBalanceCost(account *types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) AddNativeBalanceNearestInTime(account *types.Account, date time.Time) error {
|
||||
// Implementation would need to be completed based on your balance calculation logic
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function to convert GORM Account to types.Account
|
||||
func (r *GormRepository) convertGormAccountToTypesAccount(gormAccount *models.Account) *types.Account {
|
||||
return &types.Account{
|
||||
Id: string(gormAccount.ID),
|
||||
OrgId: string(gormAccount.OrgID),
|
||||
Inserted: util.MsToTime(int64(gormAccount.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormAccount.Updated)),
|
||||
Name: gormAccount.Name,
|
||||
Parent: string(gormAccount.Parent),
|
||||
Currency: gormAccount.Currency,
|
||||
Precision: gormAccount.Precision,
|
||||
DebitBalance: gormAccount.DebitBalance,
|
||||
// Balance fields would be populated by the AddBalance methods
|
||||
}
|
||||
}
|
||||
|
||||
// Escape method for SQL injection protection (GORM handles this automatically)
|
||||
func (r *GormRepository) Escape(sql string) string {
|
||||
// GORM handles SQL injection protection automatically
|
||||
// This method is kept for interface compatibility
|
||||
return sql
|
||||
}
|
||||
462
core/repository/gorm_repository_interfaces.go
Normal file
462
core/repository/gorm_repository_interfaces.go
Normal file
@@ -0,0 +1,462 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/openaccounting/oa-server/models"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// OrgInterface implementation
|
||||
func (r *GormRepository) CreateOrg(org *types.Org, userId string, accounts []*types.Account) error {
|
||||
return r.db.Transaction(func(tx *gorm.DB) error {
|
||||
org.Inserted = time.Now()
|
||||
org.Updated = org.Inserted
|
||||
|
||||
// Create org
|
||||
gormOrg := &models.Org{
|
||||
ID: []byte(org.Id),
|
||||
Inserted: uint64(util.TimeToMs(org.Inserted)),
|
||||
Updated: uint64(util.TimeToMs(org.Updated)),
|
||||
Name: org.Name,
|
||||
Currency: org.Currency,
|
||||
Precision: org.Precision,
|
||||
Timezone: org.Timezone,
|
||||
}
|
||||
|
||||
if err := tx.Create(gormOrg).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create accounts
|
||||
for _, account := range accounts {
|
||||
gormAccount := &models.Account{
|
||||
ID: []byte(account.Id),
|
||||
OrgID: []byte(account.OrgId),
|
||||
Inserted: uint64(util.TimeToMs(time.Now())),
|
||||
Updated: uint64(util.TimeToMs(time.Now())),
|
||||
Name: account.Name,
|
||||
Parent: []byte(account.Parent),
|
||||
Currency: account.Currency,
|
||||
Precision: account.Precision,
|
||||
DebitBalance: account.DebitBalance,
|
||||
}
|
||||
if err := tx.Create(gormAccount).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Create userorg association
|
||||
userOrg := &models.UserOrg{
|
||||
UserID: []byte(userId),
|
||||
OrgID: []byte(org.Id),
|
||||
Admin: true,
|
||||
}
|
||||
|
||||
return tx.Create(userOrg).Error
|
||||
})
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateOrg(org *types.Org) error {
|
||||
org.Updated = time.Now()
|
||||
|
||||
return r.db.Model(&models.Org{}).
|
||||
Where("id = ?", []byte(org.Id)).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": util.TimeToMs(org.Updated),
|
||||
"name": org.Name,
|
||||
"currency": org.Currency,
|
||||
"precision": org.Precision,
|
||||
"timezone": org.Timezone,
|
||||
}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetOrg(orgId, userId string) (*types.Org, error) {
|
||||
var gormOrg models.Org
|
||||
result := r.db.Table("orgs").
|
||||
Select("orgs.*").
|
||||
Joins("JOIN user_orgs ON user_orgs.org_id = orgs.id").
|
||||
Where("orgs.id = ? AND user_orgs.user_id = ?", []byte(orgId), []byte(userId)).
|
||||
First(&gormOrg)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormOrgToTypesOrg(&gormOrg), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetOrgs(userId string) ([]*types.Org, error) {
|
||||
var gormOrgs []models.Org
|
||||
result := r.db.Table("orgs").
|
||||
Select("orgs.*").
|
||||
Joins("JOIN user_orgs ON user_orgs.org_id = orgs.id").
|
||||
Where("user_orgs.user_id = ?", []byte(userId)).
|
||||
Find(&gormOrgs)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
orgs := make([]*types.Org, len(gormOrgs))
|
||||
for i, gormOrg := range gormOrgs {
|
||||
orgs[i] = r.convertGormOrgToTypesOrg(&gormOrg)
|
||||
}
|
||||
|
||||
return orgs, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetOrgUserIds(orgId string) ([]string, error) {
|
||||
var userIds []string
|
||||
result := r.db.Table("user_orgs").
|
||||
Select("LOWER(HEX(user_id)) as user_id").
|
||||
Where("org_id = ?", []byte(orgId)).
|
||||
Pluck("user_id", &userIds)
|
||||
|
||||
return userIds, result.Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) InsertInvite(invite *types.Invite) error {
|
||||
invite.Inserted = time.Now()
|
||||
invite.Updated = invite.Inserted
|
||||
|
||||
gormInvite := &models.Invite{
|
||||
ID: invite.Id,
|
||||
OrgID: []byte(invite.OrgId),
|
||||
Inserted: uint64(util.TimeToMs(invite.Inserted)),
|
||||
Updated: uint64(util.TimeToMs(invite.Updated)),
|
||||
Email: invite.Email,
|
||||
Accepted: invite.Accepted,
|
||||
}
|
||||
|
||||
return r.db.Create(gormInvite).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) AcceptInvite(invite *types.Invite, userId string) error {
|
||||
return r.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Update invite
|
||||
if err := tx.Model(&models.Invite{}).
|
||||
Where("id = ?", []byte(invite.Id)).
|
||||
Update("accepted", true).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create userorg association
|
||||
userOrg := &models.UserOrg{
|
||||
UserID: []byte(userId),
|
||||
OrgID: []byte(invite.OrgId),
|
||||
Admin: false,
|
||||
}
|
||||
|
||||
return tx.Create(userOrg).Error
|
||||
})
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetInvites(orgId string) ([]*types.Invite, error) {
|
||||
var gormInvites []models.Invite
|
||||
result := r.db.Where("org_id = ?", []byte(orgId)).Find(&gormInvites)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
invites := make([]*types.Invite, len(gormInvites))
|
||||
for i, gormInvite := range gormInvites {
|
||||
invites[i] = r.convertGormInviteToTypesInvite(&gormInvite)
|
||||
}
|
||||
|
||||
return invites, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetInvite(id string) (*types.Invite, error) {
|
||||
var gormInvite models.Invite
|
||||
result := r.db.Where("id = ?", id).First(&gormInvite)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return r.convertGormInviteToTypesInvite(&gormInvite), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteInvite(id string) error {
|
||||
return r.db.Where("id = ?", id).Delete(&models.Invite{}).Error
|
||||
}
|
||||
|
||||
// SessionInterface implementation (basic stubs)
|
||||
func (r *GormRepository) InsertSession(session *types.Session) error {
|
||||
var terminated uint64
|
||||
if session.Terminated.Valid {
|
||||
terminated = uint64(util.TimeToMs(session.Terminated.Time))
|
||||
}
|
||||
|
||||
gormSession := &models.Session{
|
||||
ID: []byte(session.Id),
|
||||
UserID: []byte(session.UserId),
|
||||
Inserted: uint64(util.TimeToMs(time.Now())),
|
||||
Updated: uint64(util.TimeToMs(time.Now())),
|
||||
Terminated: terminated,
|
||||
}
|
||||
return r.db.Create(gormSession).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) TerminateSession(sessionId string) error {
|
||||
return r.db.Model(&models.Session{}).
|
||||
Where("id = ?", []byte(sessionId)).
|
||||
Update("terminated", uint64(util.TimeToMs(time.Now()))).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteSession(sessionId, userId string) error {
|
||||
return r.db.Where("id = ? AND user_id = ?", []byte(sessionId), []byte(userId)).
|
||||
Delete(&models.Session{}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateSessionActivity(sessionId string) error {
|
||||
return r.db.Model(&models.Session{}).
|
||||
Where("id = ?", []byte(sessionId)).
|
||||
Update("updated", uint64(util.TimeToMs(time.Now()))).Error
|
||||
}
|
||||
|
||||
// APIKey interface (basic stubs)
|
||||
func (r *GormRepository) InsertApiKey(apiKey *types.ApiKey) error {
|
||||
gormApiKey := &models.APIKey{
|
||||
ID: []byte(apiKey.Id),
|
||||
UserID: []byte(apiKey.UserId),
|
||||
Inserted: uint64(util.TimeToMs(time.Now())),
|
||||
Updated: uint64(util.TimeToMs(time.Now())),
|
||||
Label: apiKey.Label,
|
||||
}
|
||||
return r.db.Create(gormApiKey).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteApiKey(keyId, userId string) error {
|
||||
return r.db.Where("id = ? AND user_id = ?", []byte(keyId), []byte(userId)).
|
||||
Delete(&models.APIKey{}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateApiKey(apiKey *types.ApiKey) error {
|
||||
return r.db.Model(&models.APIKey{}).
|
||||
Where("id = ?", []byte(apiKey.Id)).
|
||||
Updates(map[string]interface{}{
|
||||
"updated": uint64(util.TimeToMs(time.Now())),
|
||||
"label": apiKey.Label,
|
||||
}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetApiKeys(userId string) ([]*types.ApiKey, error) {
|
||||
var gormApiKeys []models.APIKey
|
||||
result := r.db.Where("user_id = ? AND deleted_at IS NULL", []byte(userId)).
|
||||
Find(&gormApiKeys)
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
apiKeys := make([]*types.ApiKey, len(gormApiKeys))
|
||||
for i, gormApiKey := range gormApiKeys {
|
||||
apiKeys[i] = r.convertGormApiKeyToTypesApiKey(&gormApiKey)
|
||||
}
|
||||
|
||||
return apiKeys, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) UpdateApiKeyActivity(keyId string) error {
|
||||
return r.db.Model(&models.APIKey{}).
|
||||
Where("id = ?", []byte(keyId)).
|
||||
Update("updated", uint64(util.TimeToMs(time.Now()))).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) convertGormApiKeyToTypesApiKey(gormApiKey *models.APIKey) *types.ApiKey {
|
||||
return &types.ApiKey{
|
||||
Id: string(gormApiKey.ID),
|
||||
Inserted: util.MsToTime(int64(gormApiKey.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormApiKey.Updated)),
|
||||
UserId: string(gormApiKey.UserID),
|
||||
Label: gormApiKey.Label,
|
||||
}
|
||||
}
|
||||
|
||||
// TransactionInterface implementation
|
||||
func (r *GormRepository) InsertTransaction(transaction *types.Transaction) error {
|
||||
gormTransaction := &models.Transaction{
|
||||
ID: []byte(transaction.Id),
|
||||
OrgID: []byte(transaction.OrgId),
|
||||
UserID: []byte(transaction.UserId),
|
||||
Date: uint64(transaction.Date.Unix()),
|
||||
Inserted: uint64(util.TimeToMs(time.Now())),
|
||||
Updated: uint64(util.TimeToMs(time.Now())),
|
||||
Description: transaction.Description,
|
||||
Data: transaction.Data,
|
||||
}
|
||||
return r.db.Create(gormTransaction).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetTransactionById(id string) (*types.Transaction, error) {
|
||||
var gormTransaction models.Transaction
|
||||
result := r.db.Where("id = ?", []byte(id)).First(&gormTransaction)
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
return r.convertGormTransactionToTypesTransaction(&gormTransaction), nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetTransactionsByAccount(accountId string, options *types.QueryOptions) ([]*types.Transaction, error) {
|
||||
var gormTransactions []models.Transaction
|
||||
query := r.db.Table("transactions").
|
||||
Joins("JOIN splits ON splits.transaction_id = transactions.id").
|
||||
Where("splits.account_id = ?", []byte(accountId))
|
||||
|
||||
if options != nil {
|
||||
// Apply query options like limit, skip, date range, etc.
|
||||
if options.Limit > 0 {
|
||||
query = query.Limit(int(options.Limit))
|
||||
}
|
||||
if options.Skip > 0 {
|
||||
query = query.Offset(int(options.Skip))
|
||||
}
|
||||
}
|
||||
|
||||
result := query.Find(&gormTransactions)
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
transactions := make([]*types.Transaction, len(gormTransactions))
|
||||
for i, gormTx := range gormTransactions {
|
||||
transactions[i] = r.convertGormTransactionToTypesTransaction(&gormTx)
|
||||
}
|
||||
|
||||
return transactions, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetTransactionsByOrg(orgId string, options *types.QueryOptions, accountIds []string) ([]*types.Transaction, error) {
|
||||
var gormTransactions []models.Transaction
|
||||
query := r.db.Where("org_id = ?", []byte(orgId))
|
||||
|
||||
if len(accountIds) > 0 {
|
||||
// Convert string IDs to byte arrays
|
||||
byteAccountIds := make([][]byte, len(accountIds))
|
||||
for i, id := range accountIds {
|
||||
byteAccountIds[i] = []byte(id)
|
||||
}
|
||||
query = query.Where("id IN (SELECT DISTINCT transaction_id FROM splits WHERE account_id IN ?)", byteAccountIds)
|
||||
}
|
||||
|
||||
if options != nil {
|
||||
if options.Limit > 0 {
|
||||
query = query.Limit(int(options.Limit))
|
||||
}
|
||||
if options.Skip > 0 {
|
||||
query = query.Offset(int(options.Skip))
|
||||
}
|
||||
}
|
||||
|
||||
result := query.Find(&gormTransactions)
|
||||
if result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
transactions := make([]*types.Transaction, len(gormTransactions))
|
||||
for i, gormTx := range gormTransactions {
|
||||
transactions[i] = r.convertGormTransactionToTypesTransaction(&gormTx)
|
||||
}
|
||||
|
||||
return transactions, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteTransaction(id string) error {
|
||||
return r.db.Where("id = ?", []byte(id)).Delete(&models.Transaction{}).Error
|
||||
}
|
||||
|
||||
func (r *GormRepository) DeleteAndInsertTransaction(id string, transaction *types.Transaction) error {
|
||||
return r.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Delete the old transaction
|
||||
if err := tx.Where("id = ?", []byte(id)).Delete(&models.Transaction{}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert the new transaction
|
||||
gormTransaction := &models.Transaction{
|
||||
ID: []byte(transaction.Id),
|
||||
OrgID: []byte(transaction.OrgId),
|
||||
UserID: []byte(transaction.UserId),
|
||||
Date: uint64(transaction.Date.Unix()),
|
||||
Inserted: uint64(util.TimeToMs(time.Now())),
|
||||
Updated: uint64(util.TimeToMs(time.Now())),
|
||||
Description: transaction.Description,
|
||||
Data: transaction.Data,
|
||||
}
|
||||
|
||||
return tx.Create(gormTransaction).Error
|
||||
})
|
||||
}
|
||||
|
||||
func (r *GormRepository) convertGormTransactionToTypesTransaction(gormTx *models.Transaction) *types.Transaction {
|
||||
return &types.Transaction{
|
||||
Id: string(gormTx.ID),
|
||||
OrgId: string(gormTx.OrgID),
|
||||
UserId: string(gormTx.UserID),
|
||||
Date: time.Unix(int64(gormTx.Date), 0),
|
||||
Inserted: util.MsToTime(int64(gormTx.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormTx.Updated)),
|
||||
Description: gormTx.Description,
|
||||
Data: gormTx.Data,
|
||||
Deleted: gormTx.Deleted,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper conversion functions
|
||||
func (r *GormRepository) convertGormOrgToTypesOrg(gormOrg *models.Org) *types.Org {
|
||||
return &types.Org{
|
||||
Id: string(gormOrg.ID),
|
||||
Inserted: util.MsToTime(int64(gormOrg.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormOrg.Updated)),
|
||||
Name: gormOrg.Name,
|
||||
Currency: gormOrg.Currency,
|
||||
Precision: gormOrg.Precision,
|
||||
Timezone: gormOrg.Timezone,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *GormRepository) convertGormInviteToTypesInvite(gormInvite *models.Invite) *types.Invite {
|
||||
return &types.Invite{
|
||||
Id: string(gormInvite.ID),
|
||||
OrgId: string(gormInvite.OrgID),
|
||||
Inserted: util.MsToTime(int64(gormInvite.Inserted)),
|
||||
Updated: util.MsToTime(int64(gormInvite.Updated)),
|
||||
Email: gormInvite.Email,
|
||||
Accepted: gormInvite.Accepted,
|
||||
}
|
||||
}
|
||||
|
||||
// Stub implementations for remaining interfaces that aren't fully used yet
|
||||
func (r *GormRepository) GetPrices(orgId string, date time.Time) ([]*types.Price, error) {
|
||||
// Stub implementation - add as needed
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) InsertPrice(price *types.Price) error {
|
||||
// Stub implementation - add as needed
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) Ping() error {
|
||||
// Check if the database connection is alive
|
||||
sqlDB, err := r.db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sqlDB.Ping()
|
||||
}
|
||||
|
||||
func (r *GormRepository) InsertBudget(budget *types.Budget) error {
|
||||
// Stub implementation - add as needed
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormRepository) GetBudgets(orgId string) ([]*types.Budget, error) {
|
||||
// Stub implementation - add as needed
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,48 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/api"
|
||||
"github.com/openaccounting/oa-server/core/auth"
|
||||
"github.com/openaccounting/oa-server/core/model"
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"github.com/openaccounting/oa-server/core/repository"
|
||||
"github.com/openaccounting/oa-server/core/util"
|
||||
"github.com/openaccounting/oa-server/database"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
//filename is the path to the json config file
|
||||
// Initialize Viper configuration
|
||||
var config types.Config
|
||||
file, err := os.Open("./config.json")
|
||||
|
||||
|
||||
// Set config file properties
|
||||
viper.SetConfigName("config")
|
||||
viper.SetConfigType("json")
|
||||
viper.AddConfigPath(".")
|
||||
viper.AddConfigPath("/etc/openaccounting/")
|
||||
viper.AddConfigPath("$HOME/.openaccounting")
|
||||
|
||||
// Enable environment variables
|
||||
viper.AutomaticEnv()
|
||||
viper.SetEnvPrefix("OA") // will look for OA_DATABASE_PASSWORD, etc.
|
||||
|
||||
// Set default values
|
||||
viper.SetDefault("Address", "localhost")
|
||||
viper.SetDefault("Port", 8080)
|
||||
viper.SetDefault("DatabaseDriver", "sqlite")
|
||||
viper.SetDefault("DatabaseFile", "./openaccounting.db")
|
||||
viper.SetDefault("ApiPrefix", "/api/v1")
|
||||
|
||||
// Read configuration
|
||||
err := viper.ReadInConfig()
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to open ./config.json with: %s", err.Error()))
|
||||
log.Printf("Warning: Could not read config file: %v", err)
|
||||
log.Println("Using environment variables and defaults")
|
||||
}
|
||||
|
||||
// Unmarshal config into struct
|
||||
err = viper.Unmarshal(&config)
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to unmarshal config: %s", err.Error()))
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&config)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to decode ./config.json with: %s", err.Error()))
|
||||
// Parse database address (assuming format host:port for MySQL)
|
||||
host := config.DatabaseAddress
|
||||
port := "3306"
|
||||
if len(config.DatabaseAddress) > 0 {
|
||||
// If there's a colon, split host and port
|
||||
if colonIndex := len(config.DatabaseAddress); colonIndex > 0 {
|
||||
host = config.DatabaseAddress
|
||||
}
|
||||
}
|
||||
|
||||
connectionString := config.User + ":" + config.Password + "@" + config.DatabaseAddress + "/" + config.Database
|
||||
// Default to SQLite if no driver specified
|
||||
driver := config.DatabaseDriver
|
||||
if driver == "" {
|
||||
driver = "sqlite"
|
||||
}
|
||||
|
||||
db, err := db.NewDB(connectionString)
|
||||
// Initialize GORM database
|
||||
dbConfig := &database.Config{
|
||||
Driver: driver,
|
||||
Host: host,
|
||||
Port: port,
|
||||
User: config.User,
|
||||
Password: config.Password,
|
||||
DBName: config.Database,
|
||||
File: config.DatabaseFile,
|
||||
SSLMode: "disable", // Adjust as needed
|
||||
}
|
||||
|
||||
err = database.Connect(dbConfig)
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to connect to database with: %s", err.Error()))
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
err = database.AutoMigrate()
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to run migrations: %s", err.Error()))
|
||||
}
|
||||
|
||||
err = database.Migrate()
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("failed to run custom migrations: %s", err.Error()))
|
||||
}
|
||||
|
||||
bc := &util.StandardBcrypt{}
|
||||
|
||||
model.NewModel(db, bc, config)
|
||||
auth.NewAuthService(db, bc)
|
||||
// Create GORM repository and models
|
||||
gormRepo := repository.NewGormRepository(database.DB)
|
||||
gormModel := model.NewGormModel(database.DB, bc, config)
|
||||
auth.NewGormAuthService(gormRepo, bc)
|
||||
|
||||
// Set the global model instance
|
||||
model.Instance = gormModel
|
||||
|
||||
app, err := api.Init(config.ApiPrefix)
|
||||
if err != nil {
|
||||
|
||||
48
core/util/id/id.go
Normal file
48
core/util/id/id.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package id
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// New creates a new binary ID (16 bytes) using UUID v4
|
||||
func New() []byte {
|
||||
u := uuid.New()
|
||||
return u[:]
|
||||
}
|
||||
|
||||
// ToUUID converts a binary ID back to UUID
|
||||
func ToUUID(b []byte) (uuid.UUID, error) {
|
||||
return uuid.FromBytes(b)
|
||||
}
|
||||
|
||||
// String returns the string representation of a binary ID
|
||||
func String(b []byte) string {
|
||||
u, err := uuid.FromBytes(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// FromString parses a string UUID into binary format
|
||||
func FromString(s string) ([]byte, error) {
|
||||
u, err := uuid.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u[:], nil
|
||||
}
|
||||
|
||||
// Uint64ToBytes converts a uint64 to 8-byte slice
|
||||
func Uint64ToBytes(v uint64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, v)
|
||||
return b
|
||||
}
|
||||
|
||||
// BytesToUint64 converts 8-byte slice to uint64
|
||||
func BytesToUint64(b []byte) uint64 {
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
332
database/database.go
Normal file
332
database/database.go
Normal file
@@ -0,0 +1,332 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/util/id"
|
||||
"github.com/openaccounting/oa-server/models"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
var DB *gorm.DB
|
||||
|
||||
type Config struct {
|
||||
Driver string // "mysql" or "sqlite"
|
||||
Host string
|
||||
Port string
|
||||
User string
|
||||
Password string
|
||||
DBName string
|
||||
SSLMode string
|
||||
// SQLite specific
|
||||
File string // SQLite database file path
|
||||
}
|
||||
|
||||
func Connect(config *Config) error {
|
||||
// Configure GORM logger
|
||||
newLogger := logger.New(
|
||||
log.New(os.Stdout, "\r\n", log.LstdFlags),
|
||||
logger.Config{
|
||||
SlowThreshold: time.Second,
|
||||
LogLevel: logger.Info,
|
||||
IgnoreRecordNotFoundError: true,
|
||||
Colorful: true,
|
||||
},
|
||||
)
|
||||
|
||||
var db *gorm.DB
|
||||
var err error
|
||||
|
||||
// Choose driver based on config
|
||||
switch config.Driver {
|
||||
case "sqlite":
|
||||
// Use SQLite
|
||||
dbFile := config.File
|
||||
if dbFile == "" {
|
||||
dbFile = "./openaccounting.db" // Default SQLite file
|
||||
}
|
||||
db, err = gorm.Open(sqlite.Open(dbFile), &gorm.Config{
|
||||
Logger: newLogger,
|
||||
})
|
||||
case "mysql":
|
||||
// Use MySQL (existing logic)
|
||||
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
config.User, config.Password, config.Host, config.Port, config.DBName)
|
||||
db, err = gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: newLogger,
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("unsupported database driver: %s (supported: mysql, sqlite)", config.Driver)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
// Configure connection pool (only for MySQL, SQLite handles this internally)
|
||||
if config.Driver == "mysql" {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get database instance: %w", err)
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(25)
|
||||
sqlDB.SetMaxIdleConns(25)
|
||||
sqlDB.SetConnMaxLifetime(5 * time.Minute)
|
||||
}
|
||||
|
||||
DB = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// AutoMigrate runs automatic migrations for all models
|
||||
func AutoMigrate() error {
|
||||
return DB.AutoMigrate(
|
||||
&models.Org{},
|
||||
&models.User{},
|
||||
&models.UserOrg{},
|
||||
&models.Token{},
|
||||
&models.Account{},
|
||||
&models.Transaction{},
|
||||
&models.Split{},
|
||||
&models.Balance{},
|
||||
&models.Permission{},
|
||||
&models.Price{},
|
||||
&models.Session{},
|
||||
&models.APIKey{},
|
||||
&models.Invite{},
|
||||
&models.BudgetItem{},
|
||||
)
|
||||
}
|
||||
|
||||
// Migrate runs custom migrations
|
||||
func Migrate() error {
|
||||
// Create indexes
|
||||
if err := createIndexes(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Insert default data - temporarily disabled for testing
|
||||
// if err := seedDefaultData(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createIndexes() error {
|
||||
// Create custom indexes that GORM doesn't handle automatically
|
||||
// Based on original indexes.sql file
|
||||
indexes := []string{
|
||||
// Original indexes from indexes.sql
|
||||
"CREATE INDEX IF NOT EXISTS account_orgId_index ON accounts(orgId)",
|
||||
"CREATE INDEX IF NOT EXISTS split_accountId_index ON splits(accountId)",
|
||||
"CREATE INDEX IF NOT EXISTS split_transactionId_index ON splits(transactionId)",
|
||||
"CREATE INDEX IF NOT EXISTS split_date_index ON splits(date)",
|
||||
"CREATE INDEX IF NOT EXISTS split_updated_index ON splits(updated)",
|
||||
"CREATE INDEX IF NOT EXISTS budgetitem_orgId_index ON budget_items(orgId)",
|
||||
|
||||
// Additional useful indexes for performance
|
||||
"CREATE INDEX IF NOT EXISTS idx_transaction_date ON transactions(date)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_transaction_org ON transactions(orgId)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_account_parent ON accounts(parent)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_userorg_user ON user_orgs(userId)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_userorg_org ON user_orgs(orgId)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_balance_account_date ON balances(accountId, date)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_permission_org_account ON permissions(orgId, accountId)",
|
||||
}
|
||||
|
||||
for _, idx := range indexes {
|
||||
if err := DB.Exec(idx).Error; err != nil {
|
||||
return fmt.Errorf("failed to create index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func seedDefaultData() error {
|
||||
// Check if we already have data
|
||||
var count int64
|
||||
if err := DB.Model(&models.Org{}).Count(&count).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
return nil // Data already exists
|
||||
}
|
||||
|
||||
// Create a default organization
|
||||
defaultOrg := models.Org{
|
||||
ID: id.New(), // You'll need to implement this
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Default Organization",
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
Timezone: "UTC",
|
||||
}
|
||||
|
||||
if err := DB.Create(&defaultOrg).Error; err != nil {
|
||||
return fmt.Errorf("failed to create default organization: %w", err)
|
||||
}
|
||||
|
||||
// Create default accounts for the organization
|
||||
defaultAccounts := []models.Account{
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Assets",
|
||||
Parent: []byte{0}, // Root account has zero parent
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: true,
|
||||
},
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Liabilities",
|
||||
Parent: []byte{0},
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: false,
|
||||
},
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Equity",
|
||||
Parent: []byte{0},
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: false,
|
||||
},
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Revenue",
|
||||
Parent: []byte{0},
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: false,
|
||||
},
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Expenses",
|
||||
Parent: []byte{0},
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Create accounts and store their IDs for parent-child relationships
|
||||
accountMap := make(map[string]*models.Account)
|
||||
|
||||
for _, acc := range defaultAccounts {
|
||||
account := acc
|
||||
if err := DB.Create(&account).Error; err != nil {
|
||||
return fmt.Errorf("failed to create account %s: %w", acc.Name, err)
|
||||
}
|
||||
accountMap[acc.Name] = &account
|
||||
}
|
||||
|
||||
// Create Current Assets first
|
||||
var assetsParent []byte
|
||||
if assetsAccount, exists := accountMap["Assets"]; exists {
|
||||
assetsParent = assetsAccount.ID
|
||||
} else {
|
||||
return fmt.Errorf("Assets account not found in accountMap")
|
||||
}
|
||||
|
||||
currentAssets := models.Account{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Current Assets",
|
||||
Parent: assetsParent,
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: true,
|
||||
}
|
||||
|
||||
if err := DB.Create(¤tAssets).Error; err != nil {
|
||||
return fmt.Errorf("failed to create Current Assets: %w", err)
|
||||
}
|
||||
accountMap["Current Assets"] = ¤tAssets
|
||||
|
||||
// Create Accounts Payable
|
||||
var liabilitiesParent []byte
|
||||
if liabilitiesAccount, exists := accountMap["Liabilities"]; exists {
|
||||
liabilitiesParent = liabilitiesAccount.ID
|
||||
} else {
|
||||
return fmt.Errorf("Liabilities account not found in accountMap")
|
||||
}
|
||||
|
||||
accountsPayable := models.Account{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Accounts Payable",
|
||||
Parent: liabilitiesParent,
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: false,
|
||||
}
|
||||
|
||||
if err := DB.Create(&accountsPayable).Error; err != nil {
|
||||
return fmt.Errorf("failed to create Accounts Payable: %w", err)
|
||||
}
|
||||
|
||||
// Now create sub-accounts under Current Assets
|
||||
subAccounts := []models.Account{
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Cash",
|
||||
Parent: currentAssets.ID,
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: true,
|
||||
},
|
||||
{
|
||||
ID: id.New(),
|
||||
OrgID: defaultOrg.ID,
|
||||
Inserted: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Updated: uint64(time.Now().UnixNano() / int64(time.Millisecond)),
|
||||
Name: "Accounts Receivable",
|
||||
Parent: currentAssets.ID,
|
||||
Currency: "USD",
|
||||
Precision: 2,
|
||||
DebitBalance: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, acc := range subAccounts {
|
||||
if err := DB.Create(&acc).Error; err != nil {
|
||||
return fmt.Errorf("failed to create sub-account %s: %w", acc.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
49
go.mod
49
go.mod
@@ -1,16 +1,51 @@
|
||||
module github.com/openaccounting/oa-server
|
||||
|
||||
go 1.24.2
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v0.0.0-20180807142431-c84ddcca87bf
|
||||
github.com/ant0ine/go-json-rest v0.0.0-20170913041208-ebb33769ae01
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/go-sql-driver/mysql v1.8.1
|
||||
github.com/gorilla/websocket v0.0.0-20180605202552-5ed622c449da
|
||||
github.com/mailgun/mailgun-go/v4 v4.3.0
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180511142126-bb74f1db0675
|
||||
github.com/sendgrid/rest v0.0.0-20180905234047-875828e14d98 // indirect
|
||||
github.com/sendgrid/sendgrid-go v0.0.0-20180905233524-8cb43f4ca4f5 // indirect
|
||||
github.com/stretchr/objx v0.3.0 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
github.com/spf13/viper v1.20.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.32.0
|
||||
gorm.io/driver/sqlite v1.6.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/go-chi/chi v4.0.0+incompatible // indirect
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.10 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
gorm.io/driver/mysql v1.6.0
|
||||
gorm.io/gorm v1.30.0
|
||||
)
|
||||
|
||||
96
go.sum
96
go.sum
@@ -1,3 +1,5 @@
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/Masterminds/semver v0.0.0-20180807142431-c84ddcca87bf h1:BMUJnVJI5J506LOcyGHEvbCocMHAmKTRcG6CMAwGFYU=
|
||||
github.com/Masterminds/semver v0.0.0-20180807142431-c84ddcca87bf/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/ant0ine/go-json-rest v0.0.0-20170913041208-ebb33769ae01 h1:oYAjCHMjyRaNBo3nUEepDce4LC+Kuh+6jU6y+AllvnU=
|
||||
@@ -11,53 +13,89 @@ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojt
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
|
||||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/go-chi/chi v4.0.0+incompatible h1:SiLLEDyAkqNnw+T/uDTf3aFB9T4FTrwMpuYrgaRcnW4=
|
||||
github.com/go-chi/chi v4.0.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v0.0.0-20180605202552-5ed622c449da h1:b5fma7aUP2fn6+tdKKCJ0TxXYzY/5wDiqUxNdyi5VF4=
|
||||
github.com/gorilla/websocket v0.0.0-20180605202552-5ed622c449da/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailgun/mailgun-go/v4 v4.3.0 h1:9nAF7LI3k6bfDPbMZQMMl63Q8/vs+dr1FUN8eR1XMhk=
|
||||
github.com/mailgun/mailgun-go/v4 v4.3.0/go.mod h1:fWuBI2iaS/pSSyo6+EBpHjatQO3lV8onwqcRy7joSJI=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180511142126-bb74f1db0675 h1:/rdJjIiKG5rRdwG5yxHmSE/7ZREjpyC0kL7GxGT/qJw=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20180511142126-bb74f1db0675/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
|
||||
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sendgrid/rest v0.0.0-20180905234047-875828e14d98 h1:wpBZ5DAYLNl+2v4E4WP8k/y8tM5OjIf1FezJS1qX8sU=
|
||||
github.com/sendgrid/rest v0.0.0-20180905234047-875828e14d98/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE=
|
||||
github.com/sendgrid/sendgrid-go v0.0.0-20180905233524-8cb43f4ca4f5 h1:V18LU+jSbihmDiWfLSzs9FV1d3KVB1gRTkNxgVHmcvg=
|
||||
github.com/sendgrid/sendgrid-go v0.0.0-20180905233524-8cb43f4ca4f5/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8=
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
|
||||
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
|
||||
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
|
||||
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
|
||||
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
golang.org/x/crypto v0.0.0-20171231215028-0fcca4842a8d h1:GrqEEc3+MtHKTsZrdIGVoYDgLpbSRzW1EF+nLu0PcHE=
|
||||
golang.org/x/crypto v0.0.0-20171231215028-0fcca4842a8d/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/mysql v1.6.0 h1:eNbLmNTpPpTOVZi8MMxCi2aaIm0ZpInbORNXDwyLGvg=
|
||||
gorm.io/driver/mysql v1.6.0/go.mod h1:D/oCC2GWK3M/dqoLxnOlaNKmXz8WNTfcS9y5ovaSqKo=
|
||||
gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
|
||||
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
|
||||
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
|
||||
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
|
||||
166
justfile
Normal file
166
justfile
Normal file
@@ -0,0 +1,166 @@
|
||||
# OpenAccounting Server - Just recipes
|
||||
# https://github.com/casey/just
|
||||
|
||||
# Default recipe
|
||||
default:
|
||||
@just --list
|
||||
|
||||
# Variables
|
||||
image_name := "openaccounting-server"
|
||||
tag := "latest"
|
||||
|
||||
# Build the Go application
|
||||
build:
|
||||
@echo "Building OpenAccounting Server..."
|
||||
go build -o server ./core/
|
||||
|
||||
# Run the server locally
|
||||
run: build
|
||||
@echo "Starting server locally..."
|
||||
./server
|
||||
|
||||
# Run with custom environment
|
||||
run-dev: build
|
||||
@echo "Starting server in development mode..."
|
||||
OA_DATABASE_DRIVER=sqlite OA_DATABASE_FILE=./dev.db OA_PORT=8080 ./server
|
||||
|
||||
# Run tests
|
||||
test:
|
||||
@echo "Running tests..."
|
||||
go test ./...
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
@echo "Cleaning up..."
|
||||
rm -f server
|
||||
rm -f *.db
|
||||
|
||||
# Docker recipes
|
||||
|
||||
# Build Docker image
|
||||
docker-build:
|
||||
@echo "Building Docker image: {{image_name}}:{{tag}}"
|
||||
docker build -t {{image_name}}:{{tag}} .
|
||||
|
||||
# Run container with SQLite (development)
|
||||
docker-run: docker-build
|
||||
@echo "Running container with SQLite..."
|
||||
docker run --rm -p 8080:8080 \
|
||||
-e OA_DATABASE_DRIVER=sqlite \
|
||||
-e OA_DATABASE_FILE=/app/data/openaccounting.db \
|
||||
-v $(pwd)/data:/app/data \
|
||||
{{image_name}}:{{tag}}
|
||||
|
||||
# Run container with MySQL (production example)
|
||||
docker-run-mysql: docker-build
|
||||
@echo "Running container with MySQL (requires external MySQL)..."
|
||||
docker run --rm -p 8080:8080 \
|
||||
-e OA_DATABASE_DRIVER=mysql \
|
||||
-e OA_DATABASE_ADDRESS=mysql:3306 \
|
||||
-e OA_DATABASE=openaccounting \
|
||||
-e OA_USER=openaccounting \
|
||||
-e OA_PASSWORD=secret \
|
||||
{{image_name}}:{{tag}}
|
||||
|
||||
# Run with docker-compose (if you create one)
|
||||
docker-compose-up:
|
||||
@echo "Starting with docker-compose..."
|
||||
docker-compose up -d
|
||||
|
||||
docker-compose-down:
|
||||
@echo "Stopping docker-compose..."
|
||||
docker-compose down
|
||||
|
||||
# Development utilities
|
||||
|
||||
# Format code
|
||||
fmt:
|
||||
@echo "Formatting code..."
|
||||
go fmt ./...
|
||||
|
||||
# Lint code (requires golangci-lint)
|
||||
lint:
|
||||
@echo "Linting code..."
|
||||
golangci-lint run
|
||||
|
||||
# Install development dependencies
|
||||
install-deps:
|
||||
@echo "Installing development dependencies..."
|
||||
go mod download
|
||||
go mod vendor
|
||||
|
||||
# Update dependencies
|
||||
update-deps:
|
||||
@echo "Updating dependencies..."
|
||||
go get -u ./...
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
# Database utilities
|
||||
|
||||
# Create SQLite database directory
|
||||
init-db:
|
||||
@echo "Creating database directory..."
|
||||
mkdir -p data
|
||||
|
||||
# Reset SQLite database
|
||||
reset-db:
|
||||
@echo "Resetting SQLite database..."
|
||||
rm -f *.db data/*.db
|
||||
|
||||
# Migration recipes
|
||||
|
||||
# Run database migrations manually (if needed)
|
||||
migrate:
|
||||
@echo "Running database migrations..."
|
||||
go run ./core/ --migrate-only || echo "Migration command not implemented yet"
|
||||
|
||||
# Production utilities
|
||||
|
||||
# Build for production
|
||||
build-prod:
|
||||
@echo "Building for production..."
|
||||
CGO_ENABLED=1 GOOS=linux go build -a -installsuffix cgo -ldflags="-w -s" -o server ./core/
|
||||
|
||||
# Create release tarball
|
||||
release: build-prod
|
||||
@echo "Creating release package..."
|
||||
tar -czf openaccounting-server-$(date +%Y%m%d).tar.gz server config.json.sample README.md
|
||||
|
||||
# Security scan (requires trivy)
|
||||
security-scan:
|
||||
@echo "Scanning Docker image for vulnerabilities..."
|
||||
trivy image {{image_name}}:{{tag}}
|
||||
|
||||
# Show configuration help
|
||||
config-help:
|
||||
@echo "OpenAccounting Server Configuration:"
|
||||
@echo ""
|
||||
@echo "Environment Variables (prefix with OA_):"
|
||||
@echo " OA_ADDRESS Server address (default: localhost)"
|
||||
@echo " OA_PORT Server port (default: 8080)"
|
||||
@echo " OA_API_PREFIX API prefix (default: /api/v1)"
|
||||
@echo " OA_DATABASE_DRIVER Database driver: sqlite or mysql (default: sqlite)"
|
||||
@echo " OA_DATABASE_FILE SQLite database file (default: ./openaccounting.db)"
|
||||
@echo " OA_DATABASE_ADDRESS MySQL address (e.g., localhost:3306)"
|
||||
@echo " OA_DATABASE MySQL database name"
|
||||
@echo " OA_USER Database username"
|
||||
@echo " OA_PASSWORD Database password (recommended for security)"
|
||||
@echo " OA_MAILGUN_DOMAIN Mailgun domain"
|
||||
@echo " OA_MAILGUN_KEY Mailgun API key (recommended for security)"
|
||||
@echo " OA_MAILGUN_EMAIL Mailgun email"
|
||||
@echo " OA_MAILGUN_SENDER Mailgun sender name"
|
||||
@echo ""
|
||||
@echo "Examples:"
|
||||
@echo " Development: OA_DATABASE_DRIVER=sqlite OA_PORT=8080 ./server"
|
||||
@echo " Production: OA_DATABASE_DRIVER=mysql OA_PASSWORD=secret ./server"
|
||||
|
||||
# All-in-one development setup
|
||||
dev-setup: install-deps init-db build
|
||||
@echo "Development setup complete!"
|
||||
@echo "Run 'just run-dev' to start the server"
|
||||
|
||||
# All-in-one production build
|
||||
prod-build: clean build-prod docker-build
|
||||
@echo "Production build complete!"
|
||||
@echo "Run 'just docker-run' to test the container"
|
||||
@@ -1,105 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatal("Usage: migrate1.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
|
||||
if command != "upgrade" && command != "downgrade" {
|
||||
log.Fatal("Usage: migrate1.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
//filename is the path to the json config file
|
||||
var config types.Config
|
||||
file, err := os.Open("./config.json")
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&config)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
connectionString := config.User + ":" + config.Password + "@/" + config.Database
|
||||
db, err := db.NewDB(connectionString)
|
||||
|
||||
if command == "upgrade" {
|
||||
err = upgrade(db)
|
||||
} else {
|
||||
err = downgrade(db)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("done")
|
||||
}
|
||||
|
||||
func upgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "ALTER TABLE user ADD COLUMN signupSource VARCHAR(100) NOT NULL AFTER emailVerifyCode"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func downgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "ALTER TABLE user DROP COLUMN signupSource"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatal("Usage: migrate2.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
|
||||
if command != "upgrade" && command != "downgrade" {
|
||||
log.Fatal("Usage: migrate2.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
//filename is the path to the json config file
|
||||
var config types.Config
|
||||
file, err := os.Open("./config.json")
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&config)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
connectionString := config.User + ":" + config.Password + "@/" + config.Database
|
||||
db, err := db.NewDB(connectionString)
|
||||
|
||||
if command == "upgrade" {
|
||||
err = upgrade(db)
|
||||
} else {
|
||||
err = downgrade(db)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("done")
|
||||
}
|
||||
|
||||
func upgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "ALTER TABLE org ADD COLUMN timezone VARCHAR(100) NOT NULL AFTER `precision`"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func downgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "ALTER TABLE org DROP COLUMN timezone"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/openaccounting/oa-server/core/model/db"
|
||||
"github.com/openaccounting/oa-server/core/model/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatal("Usage: migrate3.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
|
||||
if command != "upgrade" && command != "downgrade" {
|
||||
log.Fatal("Usage: migrate3.go <upgrade/downgrade>")
|
||||
}
|
||||
|
||||
//filename is the path to the json config file
|
||||
var config types.Config
|
||||
file, err := os.Open("./config.json")
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(file)
|
||||
err = decoder.Decode(&config)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
connectionString := config.User + ":" + config.Password + "@/" + config.Database
|
||||
db, err := db.NewDB(connectionString)
|
||||
|
||||
if command == "upgrade" {
|
||||
err = upgrade(db)
|
||||
} else {
|
||||
err = downgrade(db)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Println("done")
|
||||
}
|
||||
|
||||
func upgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "CREATE TABLE budgetitem (id INT UNSIGNED NOT NULL AUTO_INCREMENT, orgId BINARY(16) NOT NULL, accountId BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, amount BIGINT NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB;"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func downgrade(db *db.DB) (err error) {
|
||||
tx, err := db.Begin()
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
tx.Rollback()
|
||||
panic(p) // re-throw panic after Rollback
|
||||
} else if err != nil {
|
||||
tx.Rollback()
|
||||
} else {
|
||||
err = tx.Commit()
|
||||
}
|
||||
}()
|
||||
|
||||
query1 := "DROP TABLE budgetitem"
|
||||
|
||||
if _, err = tx.Exec(query1); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
18
models/account.go
Normal file
18
models/account.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package models
|
||||
|
||||
// Account represents a financial account
|
||||
type Account struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Name string `gorm:"column:name;size:100;not null"`
|
||||
Parent []byte `gorm:"column:parent;type:BINARY(16);not null"`
|
||||
Currency string `gorm:"column:currency;size:10;not null"`
|
||||
Precision int `gorm:"column:precision;not null"`
|
||||
DebitBalance bool `gorm:"column:debitBalance;not null"`
|
||||
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
Splits []Split `gorm:"foreignKey:AccountID"`
|
||||
Balances []Balance `gorm:"foreignKey:AccountID"`
|
||||
}
|
||||
13
models/api_key.go
Normal file
13
models/api_key.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package models
|
||||
|
||||
// APIKey represents API keys for users
|
||||
type APIKey struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
UserID []byte `gorm:"column:userId;type:BINARY(16);not null"`
|
||||
Label string `gorm:"column:label;size:300;not null"`
|
||||
Deleted uint64 `gorm:"column:deleted"`
|
||||
|
||||
User User `gorm:"foreignKey:UserID"`
|
||||
}
|
||||
11
models/balance.go
Normal file
11
models/balance.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package models
|
||||
|
||||
// Balance represents an account balance at a point in time
|
||||
type Balance struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement"`
|
||||
Date uint64 `gorm:"column:date;not null"`
|
||||
AccountID []byte `gorm:"column:accountId;type:BINARY(16);not null"`
|
||||
Amount int64 `gorm:"column:amount;not null"`
|
||||
|
||||
Account Account `gorm:"foreignKey:AccountID"`
|
||||
}
|
||||
45
models/base.go
Normal file
45
models/base.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/openaccounting/oa-server/core/util/id"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type Base struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
}
|
||||
|
||||
// GetUUID converts binary ID to UUID
|
||||
func (b *Base) GetUUID() (uuid.UUID, error) {
|
||||
return id.ToUUID(b.ID)
|
||||
}
|
||||
|
||||
// GetIDString returns string representation of the ID
|
||||
func (b *Base) GetIDString() string {
|
||||
return id.String(b.ID)
|
||||
}
|
||||
|
||||
// SetIDFromString parses string UUID into binary ID
|
||||
func (b *Base) SetIDFromString(s string) error {
|
||||
binID, err := id.FromString(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.ID = binID
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateID checks if the ID is a valid UUID
|
||||
func (b *Base) ValidateID() error {
|
||||
_, err := uuid.FromBytes(b.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
// BeforeCreate GORM hook to set ID if empty
|
||||
func (b *Base) BeforeCreate(tx *gorm.DB) error {
|
||||
if len(b.ID) == 0 {
|
||||
b.ID = id.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
13
models/budget_item.go
Normal file
13
models/budget_item.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package models
|
||||
|
||||
// BudgetItem represents budget items
|
||||
type BudgetItem struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
AccountID []byte `gorm:"column:accountId;type:BINARY(16);not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Amount int64 `gorm:"column:amount;not null"`
|
||||
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
Account Account `gorm:"foreignKey:AccountID"`
|
||||
}
|
||||
13
models/invite.go
Normal file
13
models/invite.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package models
|
||||
|
||||
// Invite represents organization invitations
|
||||
type Invite struct {
|
||||
ID string `gorm:"size:32;primaryKey"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Email string `gorm:"column:email;size:100;not null"`
|
||||
Accepted bool `gorm:"column:accepted;not null"`
|
||||
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
}
|
||||
15
models/org.go
Normal file
15
models/org.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package models
|
||||
|
||||
// Org represents an organization
|
||||
type Org struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Name string `gorm:"column:name;size:100;not null"`
|
||||
Currency string `gorm:"column:currency;size:10;not null"`
|
||||
Precision int `gorm:"column:precision;not null"`
|
||||
Timezone string `gorm:"column:timezone;size:100;not null"`
|
||||
|
||||
Accounts []Account `gorm:"foreignKey:OrgID"`
|
||||
UserOrgs []UserOrg `gorm:"foreignKey:OrgID"`
|
||||
}
|
||||
18
models/permission.go
Normal file
18
models/permission.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package models
|
||||
|
||||
// Permission represents access control rules
|
||||
type Permission struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
UserID []byte `gorm:"column:userId;type:BINARY(16)"`
|
||||
TokenID []byte `gorm:"column:tokenId;type:BINARY(16)"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
AccountID []byte `gorm:"column:accountId;type:BINARY(16);not null"`
|
||||
Type uint `gorm:"column:type;not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
|
||||
User User `gorm:"foreignKey:UserID"`
|
||||
Token Token `gorm:"foreignKey:TokenID"`
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
Account Account `gorm:"foreignKey:AccountID"`
|
||||
}
|
||||
14
models/price.go
Normal file
14
models/price.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package models
|
||||
|
||||
// Price represents currency exchange rates
|
||||
type Price struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
Currency string `gorm:"column:currency;size:10;not null"`
|
||||
Date uint64 `gorm:"column:date;not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Price float64 `gorm:"column:price;not null"`
|
||||
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
}
|
||||
12
models/session.go
Normal file
12
models/session.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package models
|
||||
|
||||
// Session represents user sessions
|
||||
type Session struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
UserID []byte `gorm:"column:userId;type:BINARY(16);not null"`
|
||||
Terminated uint64 `gorm:"column:terminated"`
|
||||
|
||||
User User `gorm:"foreignKey:UserID"`
|
||||
}
|
||||
17
models/split.go
Normal file
17
models/split.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package models
|
||||
|
||||
// Split represents a single entry in a transaction
|
||||
type Split struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement"`
|
||||
TransactionID []byte `gorm:"column:transactionId;type:BINARY(16);not null"`
|
||||
AccountID []byte `gorm:"column:accountId;type:BINARY(16);not null"`
|
||||
Date uint64 `gorm:"column:date;not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Amount int64 `gorm:"column:amount;not null"`
|
||||
NativeAmount int64 `gorm:"column:nativeAmount;not null"`
|
||||
Deleted bool `gorm:"column:deleted;default:false"`
|
||||
|
||||
Transaction Transaction `gorm:"foreignKey:TransactionID"`
|
||||
Account Account `gorm:"foreignKey:AccountID"`
|
||||
}
|
||||
10
models/token.go
Normal file
10
models/token.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package models
|
||||
|
||||
// Token represents an API token
|
||||
type Token struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
Name string `gorm:"column:name;size:100"`
|
||||
UserOrgID uint `gorm:"column:userOrgId;not null"`
|
||||
|
||||
UserOrg UserOrg `gorm:"foreignKey:UserOrgID"`
|
||||
}
|
||||
18
models/transaction.go
Normal file
18
models/transaction.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package models
|
||||
|
||||
// Transaction represents a financial transaction
|
||||
type Transaction struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
UserID []byte `gorm:"column:userId;type:BINARY(16);not null"`
|
||||
Date uint64 `gorm:"column:date;not null"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
Description string `gorm:"column:description;size:300;not null"`
|
||||
Data string `gorm:"column:data;type:TEXT;not null"`
|
||||
Deleted bool `gorm:"column:deleted;default:false"`
|
||||
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
User User `gorm:"foreignKey:UserID"`
|
||||
Splits []Split `gorm:"foreignKey:TransactionID"`
|
||||
}
|
||||
21
models/user.go
Normal file
21
models/user.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package models
|
||||
|
||||
// User represents a user account
|
||||
type User struct {
|
||||
ID []byte `gorm:"type:BINARY(16);primaryKey"`
|
||||
Inserted uint64 `gorm:"column:inserted;not null"`
|
||||
Updated uint64 `gorm:"column:updated;not null"`
|
||||
FirstName string `gorm:"column:firstName;size:50;not null"`
|
||||
LastName string `gorm:"column:lastName;size:50;not null"`
|
||||
Email string `gorm:"column:email;size:100;not null;unique"`
|
||||
PasswordHash string `gorm:"column:passwordHash;size:100;not null"`
|
||||
AgreeToTerms bool `gorm:"column:agreeToTerms;not null"`
|
||||
PasswordReset string `gorm:"column:passwordReset;size:32;not null"`
|
||||
EmailVerified bool `gorm:"column:emailVerified;not null"`
|
||||
EmailVerifyCode string `gorm:"column:emailVerifyCode;size:32;not null"`
|
||||
SignupSource string `gorm:"column:signupSource;size:100;not null"`
|
||||
|
||||
UserOrgs []UserOrg `gorm:"foreignKey:UserID"`
|
||||
Sessions []Session `gorm:"foreignKey:UserID"`
|
||||
APIKeys []APIKey `gorm:"foreignKey:UserID"`
|
||||
}
|
||||
12
models/user_org.go
Normal file
12
models/user_org.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package models
|
||||
|
||||
// UserOrg represents the relationship between users and organizations
|
||||
type UserOrg struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement"`
|
||||
UserID []byte `gorm:"column:userId;type:BINARY(16);not null"`
|
||||
OrgID []byte `gorm:"column:orgId;type:BINARY(16);not null"`
|
||||
Admin bool `gorm:"column:admin;default:false"`
|
||||
|
||||
User User `gorm:"foreignKey:UserID"`
|
||||
Org Org `gorm:"foreignKey:OrgID"`
|
||||
}
|
||||
27
vendor/filippo.io/edwards25519/LICENSE
generated
vendored
Normal file
27
vendor/filippo.io/edwards25519/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
14
vendor/filippo.io/edwards25519/README.md
generated
vendored
Normal file
14
vendor/filippo.io/edwards25519/README.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# filippo.io/edwards25519
|
||||
|
||||
```
|
||||
import "filippo.io/edwards25519"
|
||||
```
|
||||
|
||||
This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
|
||||
Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
|
||||
|
||||
The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
|
||||
|
||||
Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
|
||||
|
||||
Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
|
||||
20
vendor/filippo.io/edwards25519/doc.go
generated
vendored
Normal file
20
vendor/filippo.io/edwards25519/doc.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package edwards25519 implements group logic for the twisted Edwards curve
|
||||
//
|
||||
// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
|
||||
//
|
||||
// This is better known as the Edwards curve equivalent to Curve25519, and is
|
||||
// the curve used by the Ed25519 signature scheme.
|
||||
//
|
||||
// Most users don't need this package, and should instead use crypto/ed25519 for
|
||||
// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
|
||||
// github.com/gtank/ristretto255 for prime order group logic.
|
||||
//
|
||||
// However, developers who do need to interact with low-level edwards25519
|
||||
// operations can use this package, which is an extended version of
|
||||
// crypto/internal/edwards25519 from the standard library repackaged as
|
||||
// an importable module.
|
||||
package edwards25519
|
||||
427
vendor/filippo.io/edwards25519/edwards25519.go
generated
vendored
Normal file
427
vendor/filippo.io/edwards25519/edwards25519.go
generated
vendored
Normal file
@@ -0,0 +1,427 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"filippo.io/edwards25519/field"
|
||||
)
|
||||
|
||||
// Point types.
|
||||
|
||||
type projP1xP1 struct {
|
||||
X, Y, Z, T field.Element
|
||||
}
|
||||
|
||||
type projP2 struct {
|
||||
X, Y, Z field.Element
|
||||
}
|
||||
|
||||
// Point represents a point on the edwards25519 curve.
|
||||
//
|
||||
// This type works similarly to math/big.Int, and all arguments and receivers
|
||||
// are allowed to alias.
|
||||
//
|
||||
// The zero value is NOT valid, and it may be used only as a receiver.
|
||||
type Point struct {
|
||||
// Make the type not comparable (i.e. used with == or as a map key), as
|
||||
// equivalent points can be represented by different Go values.
|
||||
_ incomparable
|
||||
|
||||
// The point is internally represented in extended coordinates (X, Y, Z, T)
|
||||
// where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
|
||||
x, y, z, t field.Element
|
||||
}
|
||||
|
||||
type incomparable [0]func()
|
||||
|
||||
func checkInitialized(points ...*Point) {
|
||||
for _, p := range points {
|
||||
if p.x == (field.Element{}) && p.y == (field.Element{}) {
|
||||
panic("edwards25519: use of uninitialized Point")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type projCached struct {
|
||||
YplusX, YminusX, Z, T2d field.Element
|
||||
}
|
||||
|
||||
type affineCached struct {
|
||||
YplusX, YminusX, T2d field.Element
|
||||
}
|
||||
|
||||
// Constructors.
|
||||
|
||||
func (v *projP2) Zero() *projP2 {
|
||||
v.X.Zero()
|
||||
v.Y.One()
|
||||
v.Z.One()
|
||||
return v
|
||||
}
|
||||
|
||||
// identity is the point at infinity.
|
||||
var identity, _ = new(Point).SetBytes([]byte{
|
||||
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
||||
|
||||
// NewIdentityPoint returns a new Point set to the identity.
|
||||
func NewIdentityPoint() *Point {
|
||||
return new(Point).Set(identity)
|
||||
}
|
||||
|
||||
// generator is the canonical curve basepoint. See TestGenerator for the
|
||||
// correspondence of this encoding with the values in RFC 8032.
|
||||
var generator, _ = new(Point).SetBytes([]byte{
|
||||
0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
|
||||
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
|
||||
|
||||
// NewGeneratorPoint returns a new Point set to the canonical generator.
|
||||
func NewGeneratorPoint() *Point {
|
||||
return new(Point).Set(generator)
|
||||
}
|
||||
|
||||
func (v *projCached) Zero() *projCached {
|
||||
v.YplusX.One()
|
||||
v.YminusX.One()
|
||||
v.Z.One()
|
||||
v.T2d.Zero()
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *affineCached) Zero() *affineCached {
|
||||
v.YplusX.One()
|
||||
v.YminusX.One()
|
||||
v.T2d.Zero()
|
||||
return v
|
||||
}
|
||||
|
||||
// Assignments.
|
||||
|
||||
// Set sets v = u, and returns v.
|
||||
func (v *Point) Set(u *Point) *Point {
|
||||
*v = *u
|
||||
return v
|
||||
}
|
||||
|
||||
// Encoding.
|
||||
|
||||
// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
|
||||
// Section 5.1.2.
|
||||
func (v *Point) Bytes() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var buf [32]byte
|
||||
return v.bytes(&buf)
|
||||
}
|
||||
|
||||
func (v *Point) bytes(buf *[32]byte) []byte {
|
||||
checkInitialized(v)
|
||||
|
||||
var zInv, x, y field.Element
|
||||
zInv.Invert(&v.z) // zInv = 1 / Z
|
||||
x.Multiply(&v.x, &zInv) // x = X / Z
|
||||
y.Multiply(&v.y, &zInv) // y = Y / Z
|
||||
|
||||
out := copyFieldElement(buf, &y)
|
||||
out[31] |= byte(x.IsNegative() << 7)
|
||||
return out
|
||||
}
|
||||
|
||||
var feOne = new(field.Element).One()
|
||||
|
||||
// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
|
||||
// represent a valid point on the curve, SetBytes returns nil and an error and
|
||||
// the receiver is unchanged. Otherwise, SetBytes returns v.
|
||||
//
|
||||
// Note that SetBytes accepts all non-canonical encodings of valid points.
|
||||
// That is, it follows decoding rules that match most implementations in
|
||||
// the ecosystem rather than RFC 8032.
|
||||
func (v *Point) SetBytes(x []byte) (*Point, error) {
|
||||
// Specifically, the non-canonical encodings that are accepted are
|
||||
// 1) the ones where the field element is not reduced (see the
|
||||
// (*field.Element).SetBytes docs) and
|
||||
// 2) the ones where the x-coordinate is zero and the sign bit is set.
|
||||
//
|
||||
// Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
|
||||
// specifically the "Canonical A, R" section.
|
||||
|
||||
y, err := new(field.Element).SetBytes(x)
|
||||
if err != nil {
|
||||
return nil, errors.New("edwards25519: invalid point encoding length")
|
||||
}
|
||||
|
||||
// -x² + y² = 1 + dx²y²
|
||||
// x² + dx²y² = x²(dy² + 1) = y² - 1
|
||||
// x² = (y² - 1) / (dy² + 1)
|
||||
|
||||
// u = y² - 1
|
||||
y2 := new(field.Element).Square(y)
|
||||
u := new(field.Element).Subtract(y2, feOne)
|
||||
|
||||
// v = dy² + 1
|
||||
vv := new(field.Element).Multiply(y2, d)
|
||||
vv = vv.Add(vv, feOne)
|
||||
|
||||
// x = +√(u/v)
|
||||
xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
|
||||
if wasSquare == 0 {
|
||||
return nil, errors.New("edwards25519: invalid point encoding")
|
||||
}
|
||||
|
||||
// Select the negative square root if the sign bit is set.
|
||||
xxNeg := new(field.Element).Negate(xx)
|
||||
xx = xx.Select(xxNeg, xx, int(x[31]>>7))
|
||||
|
||||
v.x.Set(xx)
|
||||
v.y.Set(y)
|
||||
v.z.One()
|
||||
v.t.Multiply(xx, y) // xy = T / Z
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
|
||||
copy(buf[:], v.Bytes())
|
||||
return buf[:]
|
||||
}
|
||||
|
||||
// Conversions.
|
||||
|
||||
func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
|
||||
v.X.Multiply(&p.X, &p.T)
|
||||
v.Y.Multiply(&p.Y, &p.Z)
|
||||
v.Z.Multiply(&p.Z, &p.T)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP2) FromP3(p *Point) *projP2 {
|
||||
v.X.Set(&p.x)
|
||||
v.Y.Set(&p.y)
|
||||
v.Z.Set(&p.z)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *Point) fromP1xP1(p *projP1xP1) *Point {
|
||||
v.x.Multiply(&p.X, &p.T)
|
||||
v.y.Multiply(&p.Y, &p.Z)
|
||||
v.z.Multiply(&p.Z, &p.T)
|
||||
v.t.Multiply(&p.X, &p.Y)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *Point) fromP2(p *projP2) *Point {
|
||||
v.x.Multiply(&p.X, &p.Z)
|
||||
v.y.Multiply(&p.Y, &p.Z)
|
||||
v.z.Square(&p.Z)
|
||||
v.t.Multiply(&p.X, &p.Y)
|
||||
return v
|
||||
}
|
||||
|
||||
// d is a constant in the curve equation.
|
||||
var d, _ = new(field.Element).SetBytes([]byte{
|
||||
0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
|
||||
0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
|
||||
0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
|
||||
0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
|
||||
var d2 = new(field.Element).Add(d, d)
|
||||
|
||||
func (v *projCached) FromP3(p *Point) *projCached {
|
||||
v.YplusX.Add(&p.y, &p.x)
|
||||
v.YminusX.Subtract(&p.y, &p.x)
|
||||
v.Z.Set(&p.z)
|
||||
v.T2d.Multiply(&p.t, d2)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *affineCached) FromP3(p *Point) *affineCached {
|
||||
v.YplusX.Add(&p.y, &p.x)
|
||||
v.YminusX.Subtract(&p.y, &p.x)
|
||||
v.T2d.Multiply(&p.t, d2)
|
||||
|
||||
var invZ field.Element
|
||||
invZ.Invert(&p.z)
|
||||
v.YplusX.Multiply(&v.YplusX, &invZ)
|
||||
v.YminusX.Multiply(&v.YminusX, &invZ)
|
||||
v.T2d.Multiply(&v.T2d, &invZ)
|
||||
return v
|
||||
}
|
||||
|
||||
// (Re)addition and subtraction.
|
||||
|
||||
// Add sets v = p + q, and returns v.
|
||||
func (v *Point) Add(p, q *Point) *Point {
|
||||
checkInitialized(p, q)
|
||||
qCached := new(projCached).FromP3(q)
|
||||
result := new(projP1xP1).Add(p, qCached)
|
||||
return v.fromP1xP1(result)
|
||||
}
|
||||
|
||||
// Subtract sets v = p - q, and returns v.
|
||||
func (v *Point) Subtract(p, q *Point) *Point {
|
||||
checkInitialized(p, q)
|
||||
qCached := new(projCached).FromP3(q)
|
||||
result := new(projP1xP1).Sub(p, qCached)
|
||||
return v.fromP1xP1(result)
|
||||
}
|
||||
|
||||
func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YplusX)
|
||||
MM.Multiply(&YminusX, &q.YminusX)
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
ZZ2.Multiply(&p.z, &q.Z)
|
||||
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Add(&ZZ2, &TT2d)
|
||||
v.T.Subtract(&ZZ2, &TT2d)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
|
||||
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
ZZ2.Multiply(&p.z, &q.Z)
|
||||
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
|
||||
v.T.Add(&ZZ2, &TT2d) // flipped sign
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YplusX)
|
||||
MM.Multiply(&YminusX, &q.YminusX)
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
|
||||
Z2.Add(&p.z, &p.z)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Add(&Z2, &TT2d)
|
||||
v.T.Subtract(&Z2, &TT2d)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
|
||||
var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
|
||||
|
||||
YplusX.Add(&p.y, &p.x)
|
||||
YminusX.Subtract(&p.y, &p.x)
|
||||
|
||||
PP.Multiply(&YplusX, &q.YminusX) // flipped sign
|
||||
MM.Multiply(&YminusX, &q.YplusX) // flipped sign
|
||||
TT2d.Multiply(&p.t, &q.T2d)
|
||||
|
||||
Z2.Add(&p.z, &p.z)
|
||||
|
||||
v.X.Subtract(&PP, &MM)
|
||||
v.Y.Add(&PP, &MM)
|
||||
v.Z.Subtract(&Z2, &TT2d) // flipped sign
|
||||
v.T.Add(&Z2, &TT2d) // flipped sign
|
||||
return v
|
||||
}
|
||||
|
||||
// Doubling.
|
||||
|
||||
func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
|
||||
var XX, YY, ZZ2, XplusYsq field.Element
|
||||
|
||||
XX.Square(&p.X)
|
||||
YY.Square(&p.Y)
|
||||
ZZ2.Square(&p.Z)
|
||||
ZZ2.Add(&ZZ2, &ZZ2)
|
||||
XplusYsq.Add(&p.X, &p.Y)
|
||||
XplusYsq.Square(&XplusYsq)
|
||||
|
||||
v.Y.Add(&YY, &XX)
|
||||
v.Z.Subtract(&YY, &XX)
|
||||
|
||||
v.X.Subtract(&XplusYsq, &v.Y)
|
||||
v.T.Subtract(&ZZ2, &v.Z)
|
||||
return v
|
||||
}
|
||||
|
||||
// Negation.
|
||||
|
||||
// Negate sets v = -p, and returns v.
|
||||
func (v *Point) Negate(p *Point) *Point {
|
||||
checkInitialized(p)
|
||||
v.x.Negate(&p.x)
|
||||
v.y.Set(&p.y)
|
||||
v.z.Set(&p.z)
|
||||
v.t.Negate(&p.t)
|
||||
return v
|
||||
}
|
||||
|
||||
// Equal returns 1 if v is equivalent to u, and 0 otherwise.
|
||||
func (v *Point) Equal(u *Point) int {
|
||||
checkInitialized(v, u)
|
||||
|
||||
var t1, t2, t3, t4 field.Element
|
||||
t1.Multiply(&v.x, &u.z)
|
||||
t2.Multiply(&u.x, &v.z)
|
||||
t3.Multiply(&v.y, &u.z)
|
||||
t4.Multiply(&u.y, &v.z)
|
||||
|
||||
return t1.Equal(&t2) & t3.Equal(&t4)
|
||||
}
|
||||
|
||||
// Constant-time operations
|
||||
|
||||
// Select sets v to a if cond == 1 and to b if cond == 0.
|
||||
func (v *projCached) Select(a, b *projCached, cond int) *projCached {
|
||||
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
|
||||
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
|
||||
v.Z.Select(&a.Z, &b.Z, cond)
|
||||
v.T2d.Select(&a.T2d, &b.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// Select sets v to a if cond == 1 and to b if cond == 0.
|
||||
func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
|
||||
v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
|
||||
v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
|
||||
v.T2d.Select(&a.T2d, &b.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
|
||||
func (v *projCached) CondNeg(cond int) *projCached {
|
||||
v.YplusX.Swap(&v.YminusX, cond)
|
||||
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
|
||||
return v
|
||||
}
|
||||
|
||||
// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
|
||||
func (v *affineCached) CondNeg(cond int) *affineCached {
|
||||
v.YplusX.Swap(&v.YminusX, cond)
|
||||
v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
|
||||
return v
|
||||
}
|
||||
349
vendor/filippo.io/edwards25519/extra.go
generated
vendored
Normal file
349
vendor/filippo.io/edwards25519/extra.go
generated
vendored
Normal file
@@ -0,0 +1,349 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
// This file contains additional functionality that is not included in the
|
||||
// upstream crypto/internal/edwards25519 package.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"filippo.io/edwards25519/field"
|
||||
)
|
||||
|
||||
// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
|
||||
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
|
||||
func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap. Don't change the style without making
|
||||
// sure it doesn't increase the inliner cost.
|
||||
var e [4]field.Element
|
||||
X, Y, Z, T = v.extendedCoordinates(&e)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
|
||||
checkInitialized(v)
|
||||
X = e[0].Set(&v.x)
|
||||
Y = e[1].Set(&v.y)
|
||||
Z = e[2].Set(&v.z)
|
||||
T = e[3].Set(&v.t)
|
||||
return
|
||||
}
|
||||
|
||||
// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
|
||||
// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
|
||||
//
|
||||
// If the coordinates are invalid or don't represent a valid point on the curve,
|
||||
// SetExtendedCoordinates returns nil and an error and the receiver is
|
||||
// unchanged. Otherwise, SetExtendedCoordinates returns v.
|
||||
func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
|
||||
if !isOnCurve(X, Y, Z, T) {
|
||||
return nil, errors.New("edwards25519: invalid point coordinates")
|
||||
}
|
||||
v.x.Set(X)
|
||||
v.y.Set(Y)
|
||||
v.z.Set(Z)
|
||||
v.t.Set(T)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func isOnCurve(X, Y, Z, T *field.Element) bool {
|
||||
var lhs, rhs field.Element
|
||||
XX := new(field.Element).Square(X)
|
||||
YY := new(field.Element).Square(Y)
|
||||
ZZ := new(field.Element).Square(Z)
|
||||
TT := new(field.Element).Square(T)
|
||||
// -x² + y² = 1 + dx²y²
|
||||
// -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
|
||||
// -X² + Y² = Z² + dT²
|
||||
lhs.Subtract(YY, XX)
|
||||
rhs.Multiply(d, TT).Add(&rhs, ZZ)
|
||||
if lhs.Equal(&rhs) != 1 {
|
||||
return false
|
||||
}
|
||||
// xy = T/Z
|
||||
// XY/Z² = T/Z
|
||||
// XY = TZ
|
||||
lhs.Multiply(X, Y)
|
||||
rhs.Multiply(T, Z)
|
||||
return lhs.Equal(&rhs) == 1
|
||||
}
|
||||
|
||||
// BytesMontgomery converts v to a point on the birationally-equivalent
|
||||
// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
|
||||
// according to RFC 7748.
|
||||
//
|
||||
// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
|
||||
// to the same value. If v is the identity point, BytesMontgomery returns 32
|
||||
// zero bytes, analogously to the X25519 function.
|
||||
//
|
||||
// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
|
||||
// while every valid edwards25519 point has a unique u-coordinate Montgomery
|
||||
// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
|
||||
// to any edwards25519 point, and every other X25519 input corresponds to two
|
||||
// edwards25519 points.
|
||||
func (v *Point) BytesMontgomery() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var buf [32]byte
|
||||
return v.bytesMontgomery(&buf)
|
||||
}
|
||||
|
||||
func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
|
||||
checkInitialized(v)
|
||||
|
||||
// RFC 7748, Section 4.1 provides the bilinear map to calculate the
|
||||
// Montgomery u-coordinate
|
||||
//
|
||||
// u = (1 + y) / (1 - y)
|
||||
//
|
||||
// where y = Y / Z.
|
||||
|
||||
var y, recip, u field.Element
|
||||
|
||||
y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
|
||||
recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
|
||||
u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
|
||||
|
||||
return copyFieldElement(buf, &u)
|
||||
}
|
||||
|
||||
// MultByCofactor sets v = 8 * p, and returns v.
|
||||
func (v *Point) MultByCofactor(p *Point) *Point {
|
||||
checkInitialized(p)
|
||||
result := projP1xP1{}
|
||||
pp := (&projP2{}).FromP3(p)
|
||||
result.Double(pp)
|
||||
pp.FromP1xP1(&result)
|
||||
result.Double(pp)
|
||||
pp.FromP1xP1(&result)
|
||||
result.Double(pp)
|
||||
return v.fromP1xP1(&result)
|
||||
}
|
||||
|
||||
// Given k > 0, set s = s**(2*i).
|
||||
func (s *Scalar) pow2k(k int) {
|
||||
for i := 0; i < k; i++ {
|
||||
s.Multiply(s, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Invert sets s to the inverse of a nonzero scalar v, and returns s.
|
||||
//
|
||||
// If t is zero, Invert returns zero.
|
||||
func (s *Scalar) Invert(t *Scalar) *Scalar {
|
||||
// Uses a hardcoded sliding window of width 4.
|
||||
var table [8]Scalar
|
||||
var tt Scalar
|
||||
tt.Multiply(t, t)
|
||||
table[0] = *t
|
||||
for i := 0; i < 7; i++ {
|
||||
table[i+1].Multiply(&table[i], &tt)
|
||||
}
|
||||
// Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
|
||||
// so t**k = t[k/2] for odd k
|
||||
|
||||
// To compute the sliding window digits, use the following Sage script:
|
||||
|
||||
// sage: import itertools
|
||||
// sage: def sliding_window(w,k):
|
||||
// ....: digits = []
|
||||
// ....: while k > 0:
|
||||
// ....: if k % 2 == 1:
|
||||
// ....: kmod = k % (2**w)
|
||||
// ....: digits.append(kmod)
|
||||
// ....: k = k - kmod
|
||||
// ....: else:
|
||||
// ....: digits.append(0)
|
||||
// ....: k = k // 2
|
||||
// ....: return digits
|
||||
|
||||
// Now we can compute s roughly as follows:
|
||||
|
||||
// sage: s = 1
|
||||
// sage: for coeff in reversed(sliding_window(4,l-2)):
|
||||
// ....: s = s*s
|
||||
// ....: if coeff > 0 :
|
||||
// ....: s = s*t**coeff
|
||||
|
||||
// This works on one bit at a time, with many runs of zeros.
|
||||
// The digits can be collapsed into [(count, coeff)] as follows:
|
||||
|
||||
// sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
|
||||
|
||||
// Entries of the form (k, 0) turn into pow2k(k)
|
||||
// Entries of the form (1, coeff) turn into a squaring and then a table lookup.
|
||||
// We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
|
||||
|
||||
*s = table[1/2]
|
||||
s.pow2k(127 + 1)
|
||||
s.Multiply(s, &table[1/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[13/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[5/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[1/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(5 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
s.pow2k(9 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[3/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[13/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[7/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[9/2])
|
||||
s.pow2k(3 + 1)
|
||||
s.Multiply(s, &table[15/2])
|
||||
s.pow2k(4 + 1)
|
||||
s.Multiply(s, &table[11/2])
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
|
||||
//
|
||||
// Execution time depends only on the lengths of the two slices, which must match.
|
||||
func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
|
||||
if len(scalars) != len(points) {
|
||||
panic("edwards25519: called MultiScalarMult with different size inputs")
|
||||
}
|
||||
checkInitialized(points...)
|
||||
|
||||
// Proceed as in the single-base case, but share doublings
|
||||
// between each point in the multiscalar equation.
|
||||
|
||||
// Build lookup tables for each point
|
||||
tables := make([]projLookupTable, len(points))
|
||||
for i := range tables {
|
||||
tables[i].FromP3(points[i])
|
||||
}
|
||||
// Compute signed radix-16 digits for each scalar
|
||||
digits := make([][64]int8, len(scalars))
|
||||
for i := range digits {
|
||||
digits[i] = scalars[i].signedRadix16()
|
||||
}
|
||||
|
||||
// Unwrap first loop iteration to save computing 16*identity
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
// Lookup-and-add the appropriate multiple of each input point
|
||||
for j := range tables {
|
||||
tables[j].SelectInto(multiple, digits[j][63])
|
||||
tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // update v
|
||||
}
|
||||
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
|
||||
for i := 62; i >= 0; i-- {
|
||||
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
|
||||
// Lookup-and-add the appropriate multiple of each input point
|
||||
for j := range tables {
|
||||
tables[j].SelectInto(multiple, digits[j][i])
|
||||
tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // update v
|
||||
}
|
||||
tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
|
||||
//
|
||||
// Execution time depends on the inputs.
|
||||
func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
|
||||
if len(scalars) != len(points) {
|
||||
panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
|
||||
}
|
||||
checkInitialized(points...)
|
||||
|
||||
// Generalize double-base NAF computation to arbitrary sizes.
|
||||
// Here all the points are dynamic, so we only use the smaller
|
||||
// tables.
|
||||
|
||||
// Build lookup tables for each point
|
||||
tables := make([]nafLookupTable5, len(points))
|
||||
for i := range tables {
|
||||
tables[i].FromP3(points[i])
|
||||
}
|
||||
// Compute a NAF for each scalar
|
||||
nafs := make([][256]int8, len(scalars))
|
||||
for i := range nafs {
|
||||
nafs[i] = scalars[i].nonAdjacentForm(5)
|
||||
}
|
||||
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
tmp2.Zero()
|
||||
|
||||
// Move from high to low bits, doubling the accumulator
|
||||
// at each iteration and checking whether there is a nonzero
|
||||
// coefficient to look up a multiple of.
|
||||
//
|
||||
// Skip trying to find the first nonzero coefficent, because
|
||||
// searching might be more work than a few extra doublings.
|
||||
for i := 255; i >= 0; i-- {
|
||||
tmp1.Double(tmp2)
|
||||
|
||||
for j := range nafs {
|
||||
if nafs[j][i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
tables[j].SelectInto(multiple, nafs[j][i])
|
||||
tmp1.Add(v, multiple)
|
||||
} else if nafs[j][i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
tables[j].SelectInto(multiple, -nafs[j][i])
|
||||
tmp1.Sub(v, multiple)
|
||||
}
|
||||
}
|
||||
|
||||
tmp2.FromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
v.fromP2(tmp2)
|
||||
return v
|
||||
}
|
||||
420
vendor/filippo.io/edwards25519/field/fe.go
generated
vendored
Normal file
420
vendor/filippo.io/edwards25519/field/fe.go
generated
vendored
Normal file
@@ -0,0 +1,420 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package field implements fast arithmetic modulo 2^255-19.
|
||||
package field
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// Element represents an element of the field GF(2^255-19). Note that this
|
||||
// is not a cryptographically secure group, and should only be used to interact
|
||||
// with edwards25519.Point coordinates.
|
||||
//
|
||||
// This type works similarly to math/big.Int, and all arguments and receivers
|
||||
// are allowed to alias.
|
||||
//
|
||||
// The zero value is a valid zero element.
|
||||
type Element struct {
|
||||
// An element t represents the integer
|
||||
// t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
|
||||
//
|
||||
// Between operations, all limbs are expected to be lower than 2^52.
|
||||
l0 uint64
|
||||
l1 uint64
|
||||
l2 uint64
|
||||
l3 uint64
|
||||
l4 uint64
|
||||
}
|
||||
|
||||
const maskLow51Bits uint64 = (1 << 51) - 1
|
||||
|
||||
var feZero = &Element{0, 0, 0, 0, 0}
|
||||
|
||||
// Zero sets v = 0, and returns v.
|
||||
func (v *Element) Zero() *Element {
|
||||
*v = *feZero
|
||||
return v
|
||||
}
|
||||
|
||||
var feOne = &Element{1, 0, 0, 0, 0}
|
||||
|
||||
// One sets v = 1, and returns v.
|
||||
func (v *Element) One() *Element {
|
||||
*v = *feOne
|
||||
return v
|
||||
}
|
||||
|
||||
// reduce reduces v modulo 2^255 - 19 and returns it.
|
||||
func (v *Element) reduce() *Element {
|
||||
v.carryPropagate()
|
||||
|
||||
// After the light reduction we now have a field element representation
|
||||
// v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
|
||||
|
||||
// If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
|
||||
// generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
|
||||
c := (v.l0 + 19) >> 51
|
||||
c = (v.l1 + c) >> 51
|
||||
c = (v.l2 + c) >> 51
|
||||
c = (v.l3 + c) >> 51
|
||||
c = (v.l4 + c) >> 51
|
||||
|
||||
// If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
|
||||
// effectively applying the reduction identity to the carry.
|
||||
v.l0 += 19 * c
|
||||
|
||||
v.l1 += v.l0 >> 51
|
||||
v.l0 = v.l0 & maskLow51Bits
|
||||
v.l2 += v.l1 >> 51
|
||||
v.l1 = v.l1 & maskLow51Bits
|
||||
v.l3 += v.l2 >> 51
|
||||
v.l2 = v.l2 & maskLow51Bits
|
||||
v.l4 += v.l3 >> 51
|
||||
v.l3 = v.l3 & maskLow51Bits
|
||||
// no additional carry
|
||||
v.l4 = v.l4 & maskLow51Bits
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Add sets v = a + b, and returns v.
|
||||
func (v *Element) Add(a, b *Element) *Element {
|
||||
v.l0 = a.l0 + b.l0
|
||||
v.l1 = a.l1 + b.l1
|
||||
v.l2 = a.l2 + b.l2
|
||||
v.l3 = a.l3 + b.l3
|
||||
v.l4 = a.l4 + b.l4
|
||||
// Using the generic implementation here is actually faster than the
|
||||
// assembly. Probably because the body of this function is so simple that
|
||||
// the compiler can figure out better optimizations by inlining the carry
|
||||
// propagation.
|
||||
return v.carryPropagateGeneric()
|
||||
}
|
||||
|
||||
// Subtract sets v = a - b, and returns v.
|
||||
func (v *Element) Subtract(a, b *Element) *Element {
|
||||
// We first add 2 * p, to guarantee the subtraction won't underflow, and
|
||||
// then subtract b (which can be up to 2^255 + 2^13 * 19).
|
||||
v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
|
||||
v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
|
||||
v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
|
||||
v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
|
||||
v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
|
||||
return v.carryPropagate()
|
||||
}
|
||||
|
||||
// Negate sets v = -a, and returns v.
|
||||
func (v *Element) Negate(a *Element) *Element {
|
||||
return v.Subtract(feZero, a)
|
||||
}
|
||||
|
||||
// Invert sets v = 1/z mod p, and returns v.
|
||||
//
|
||||
// If z == 0, Invert returns v = 0.
|
||||
func (v *Element) Invert(z *Element) *Element {
|
||||
// Inversion is implemented as exponentiation with exponent p − 2. It uses the
|
||||
// same sequence of 255 squarings and 11 multiplications as [Curve25519].
|
||||
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
|
||||
|
||||
z2.Square(z) // 2
|
||||
t.Square(&z2) // 4
|
||||
t.Square(&t) // 8
|
||||
z9.Multiply(&t, z) // 9
|
||||
z11.Multiply(&z9, &z2) // 11
|
||||
t.Square(&z11) // 22
|
||||
z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
|
||||
|
||||
t.Square(&z2_5_0) // 2^6 - 2^1
|
||||
for i := 0; i < 4; i++ {
|
||||
t.Square(&t) // 2^10 - 2^5
|
||||
}
|
||||
z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
|
||||
|
||||
t.Square(&z2_10_0) // 2^11 - 2^1
|
||||
for i := 0; i < 9; i++ {
|
||||
t.Square(&t) // 2^20 - 2^10
|
||||
}
|
||||
z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
|
||||
|
||||
t.Square(&z2_20_0) // 2^21 - 2^1
|
||||
for i := 0; i < 19; i++ {
|
||||
t.Square(&t) // 2^40 - 2^20
|
||||
}
|
||||
t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
|
||||
|
||||
t.Square(&t) // 2^41 - 2^1
|
||||
for i := 0; i < 9; i++ {
|
||||
t.Square(&t) // 2^50 - 2^10
|
||||
}
|
||||
z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
|
||||
|
||||
t.Square(&z2_50_0) // 2^51 - 2^1
|
||||
for i := 0; i < 49; i++ {
|
||||
t.Square(&t) // 2^100 - 2^50
|
||||
}
|
||||
z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
|
||||
|
||||
t.Square(&z2_100_0) // 2^101 - 2^1
|
||||
for i := 0; i < 99; i++ {
|
||||
t.Square(&t) // 2^200 - 2^100
|
||||
}
|
||||
t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
|
||||
|
||||
t.Square(&t) // 2^201 - 2^1
|
||||
for i := 0; i < 49; i++ {
|
||||
t.Square(&t) // 2^250 - 2^50
|
||||
}
|
||||
t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
|
||||
|
||||
t.Square(&t) // 2^251 - 2^1
|
||||
t.Square(&t) // 2^252 - 2^2
|
||||
t.Square(&t) // 2^253 - 2^3
|
||||
t.Square(&t) // 2^254 - 2^4
|
||||
t.Square(&t) // 2^255 - 2^5
|
||||
|
||||
return v.Multiply(&t, &z11) // 2^255 - 21
|
||||
}
|
||||
|
||||
// Set sets v = a, and returns v.
|
||||
func (v *Element) Set(a *Element) *Element {
|
||||
*v = *a
|
||||
return v
|
||||
}
|
||||
|
||||
// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
|
||||
// not of the right length, SetBytes returns nil and an error, and the
|
||||
// receiver is unchanged.
|
||||
//
|
||||
// Consistent with RFC 7748, the most significant bit (the high bit of the
|
||||
// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
|
||||
// are accepted. Note that this is laxer than specified by RFC 8032, but
|
||||
// consistent with most Ed25519 implementations.
|
||||
func (v *Element) SetBytes(x []byte) (*Element, error) {
|
||||
if len(x) != 32 {
|
||||
return nil, errors.New("edwards25519: invalid field element input size")
|
||||
}
|
||||
|
||||
// Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
|
||||
v.l0 = binary.LittleEndian.Uint64(x[0:8])
|
||||
v.l0 &= maskLow51Bits
|
||||
// Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
|
||||
v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
|
||||
v.l1 &= maskLow51Bits
|
||||
// Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
|
||||
v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
|
||||
v.l2 &= maskLow51Bits
|
||||
// Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
|
||||
v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
|
||||
v.l3 &= maskLow51Bits
|
||||
// Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
|
||||
// Note: not bytes 25:33, shift 4, to avoid overread.
|
||||
v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
|
||||
v.l4 &= maskLow51Bits
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Bytes returns the canonical 32-byte little-endian encoding of v.
|
||||
func (v *Element) Bytes() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var out [32]byte
|
||||
return v.bytes(&out)
|
||||
}
|
||||
|
||||
func (v *Element) bytes(out *[32]byte) []byte {
|
||||
t := *v
|
||||
t.reduce()
|
||||
|
||||
var buf [8]byte
|
||||
for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
|
||||
bitsOffset := i * 51
|
||||
binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8))
|
||||
for i, bb := range buf {
|
||||
off := bitsOffset/8 + i
|
||||
if off >= len(out) {
|
||||
break
|
||||
}
|
||||
out[off] |= bb
|
||||
}
|
||||
}
|
||||
|
||||
return out[:]
|
||||
}
|
||||
|
||||
// Equal returns 1 if v and u are equal, and 0 otherwise.
|
||||
func (v *Element) Equal(u *Element) int {
|
||||
sa, sv := u.Bytes(), v.Bytes()
|
||||
return subtle.ConstantTimeCompare(sa, sv)
|
||||
}
|
||||
|
||||
// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
|
||||
func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
|
||||
|
||||
// Select sets v to a if cond == 1, and to b if cond == 0.
|
||||
func (v *Element) Select(a, b *Element, cond int) *Element {
|
||||
m := mask64Bits(cond)
|
||||
v.l0 = (m & a.l0) | (^m & b.l0)
|
||||
v.l1 = (m & a.l1) | (^m & b.l1)
|
||||
v.l2 = (m & a.l2) | (^m & b.l2)
|
||||
v.l3 = (m & a.l3) | (^m & b.l3)
|
||||
v.l4 = (m & a.l4) | (^m & b.l4)
|
||||
return v
|
||||
}
|
||||
|
||||
// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
|
||||
func (v *Element) Swap(u *Element, cond int) {
|
||||
m := mask64Bits(cond)
|
||||
t := m & (v.l0 ^ u.l0)
|
||||
v.l0 ^= t
|
||||
u.l0 ^= t
|
||||
t = m & (v.l1 ^ u.l1)
|
||||
v.l1 ^= t
|
||||
u.l1 ^= t
|
||||
t = m & (v.l2 ^ u.l2)
|
||||
v.l2 ^= t
|
||||
u.l2 ^= t
|
||||
t = m & (v.l3 ^ u.l3)
|
||||
v.l3 ^= t
|
||||
u.l3 ^= t
|
||||
t = m & (v.l4 ^ u.l4)
|
||||
v.l4 ^= t
|
||||
u.l4 ^= t
|
||||
}
|
||||
|
||||
// IsNegative returns 1 if v is negative, and 0 otherwise.
|
||||
func (v *Element) IsNegative() int {
|
||||
return int(v.Bytes()[0] & 1)
|
||||
}
|
||||
|
||||
// Absolute sets v to |u|, and returns v.
|
||||
func (v *Element) Absolute(u *Element) *Element {
|
||||
return v.Select(new(Element).Negate(u), u, u.IsNegative())
|
||||
}
|
||||
|
||||
// Multiply sets v = x * y, and returns v.
|
||||
func (v *Element) Multiply(x, y *Element) *Element {
|
||||
feMul(v, x, y)
|
||||
return v
|
||||
}
|
||||
|
||||
// Square sets v = x * x, and returns v.
|
||||
func (v *Element) Square(x *Element) *Element {
|
||||
feSquare(v, x)
|
||||
return v
|
||||
}
|
||||
|
||||
// Mult32 sets v = x * y, and returns v.
|
||||
func (v *Element) Mult32(x *Element, y uint32) *Element {
|
||||
x0lo, x0hi := mul51(x.l0, y)
|
||||
x1lo, x1hi := mul51(x.l1, y)
|
||||
x2lo, x2hi := mul51(x.l2, y)
|
||||
x3lo, x3hi := mul51(x.l3, y)
|
||||
x4lo, x4hi := mul51(x.l4, y)
|
||||
v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
|
||||
v.l1 = x1lo + x0hi
|
||||
v.l2 = x2lo + x1hi
|
||||
v.l3 = x3lo + x2hi
|
||||
v.l4 = x4lo + x3hi
|
||||
// The hi portions are going to be only 32 bits, plus any previous excess,
|
||||
// so we can skip the carry propagation.
|
||||
return v
|
||||
}
|
||||
|
||||
// mul51 returns lo + hi * 2⁵¹ = a * b.
|
||||
func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
|
||||
mh, ml := bits.Mul64(a, uint64(b))
|
||||
lo = ml & maskLow51Bits
|
||||
hi = (mh << 13) | (ml >> 51)
|
||||
return
|
||||
}
|
||||
|
||||
// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
|
||||
func (v *Element) Pow22523(x *Element) *Element {
|
||||
var t0, t1, t2 Element
|
||||
|
||||
t0.Square(x) // x^2
|
||||
t1.Square(&t0) // x^4
|
||||
t1.Square(&t1) // x^8
|
||||
t1.Multiply(x, &t1) // x^9
|
||||
t0.Multiply(&t0, &t1) // x^11
|
||||
t0.Square(&t0) // x^22
|
||||
t0.Multiply(&t1, &t0) // x^31
|
||||
t1.Square(&t0) // x^62
|
||||
for i := 1; i < 5; i++ { // x^992
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
|
||||
t1.Square(&t0) // 2^11 - 2
|
||||
for i := 1; i < 10; i++ { // 2^20 - 2^10
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t1.Multiply(&t1, &t0) // 2^20 - 1
|
||||
t2.Square(&t1) // 2^21 - 2
|
||||
for i := 1; i < 20; i++ { // 2^40 - 2^20
|
||||
t2.Square(&t2)
|
||||
}
|
||||
t1.Multiply(&t2, &t1) // 2^40 - 1
|
||||
t1.Square(&t1) // 2^41 - 2
|
||||
for i := 1; i < 10; i++ { // 2^50 - 2^10
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // 2^50 - 1
|
||||
t1.Square(&t0) // 2^51 - 2
|
||||
for i := 1; i < 50; i++ { // 2^100 - 2^50
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t1.Multiply(&t1, &t0) // 2^100 - 1
|
||||
t2.Square(&t1) // 2^101 - 2
|
||||
for i := 1; i < 100; i++ { // 2^200 - 2^100
|
||||
t2.Square(&t2)
|
||||
}
|
||||
t1.Multiply(&t2, &t1) // 2^200 - 1
|
||||
t1.Square(&t1) // 2^201 - 2
|
||||
for i := 1; i < 50; i++ { // 2^250 - 2^50
|
||||
t1.Square(&t1)
|
||||
}
|
||||
t0.Multiply(&t1, &t0) // 2^250 - 1
|
||||
t0.Square(&t0) // 2^251 - 2
|
||||
t0.Square(&t0) // 2^252 - 4
|
||||
return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
|
||||
}
|
||||
|
||||
// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
|
||||
var sqrtM1 = &Element{1718705420411056, 234908883556509,
|
||||
2233514472574048, 2117202627021982, 765476049583133}
|
||||
|
||||
// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
|
||||
//
|
||||
// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
|
||||
// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
|
||||
// and returns r and 0.
|
||||
func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
|
||||
t0 := new(Element)
|
||||
|
||||
// r = (u * v3) * (u * v7)^((p-5)/8)
|
||||
v2 := new(Element).Square(v)
|
||||
uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
|
||||
uv7 := new(Element).Multiply(uv3, t0.Square(v2))
|
||||
rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
|
||||
|
||||
check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
|
||||
|
||||
uNeg := new(Element).Negate(u)
|
||||
correctSignSqrt := check.Equal(u)
|
||||
flippedSignSqrt := check.Equal(uNeg)
|
||||
flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
|
||||
|
||||
rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
|
||||
// r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
|
||||
rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
|
||||
|
||||
r.Absolute(rr) // Choose the nonnegative square root.
|
||||
return r, correctSignSqrt | flippedSignSqrt
|
||||
}
|
||||
16
vendor/filippo.io/edwards25519/field/fe_amd64.go
generated
vendored
Normal file
16
vendor/filippo.io/edwards25519/field/fe_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
// feMul sets out = a * b. It works like feMulGeneric.
|
||||
//
|
||||
//go:noescape
|
||||
func feMul(out *Element, a *Element, b *Element)
|
||||
|
||||
// feSquare sets out = a * a. It works like feSquareGeneric.
|
||||
//
|
||||
//go:noescape
|
||||
func feSquare(out *Element, a *Element)
|
||||
379
vendor/filippo.io/edwards25519/field/fe_amd64.s
generated
vendored
Normal file
379
vendor/filippo.io/edwards25519/field/fe_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,379 @@
|
||||
// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && gc && !purego
|
||||
// +build amd64,gc,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func feMul(out *Element, a *Element, b *Element)
|
||||
TEXT ·feMul(SB), NOSPLIT, $0-24
|
||||
MOVQ a+8(FP), CX
|
||||
MOVQ b+16(FP), BX
|
||||
|
||||
// r0 = a0×b0
|
||||
MOVQ (CX), AX
|
||||
MULQ (BX)
|
||||
MOVQ AX, DI
|
||||
MOVQ DX, SI
|
||||
|
||||
// r0 += 19×a1×b4
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a2×b3
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a3×b2
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r0 += 19×a4×b1
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, DI
|
||||
ADCQ DX, SI
|
||||
|
||||
// r1 = a0×b1
|
||||
MOVQ (CX), AX
|
||||
MULQ 8(BX)
|
||||
MOVQ AX, R9
|
||||
MOVQ DX, R8
|
||||
|
||||
// r1 += a1×b0
|
||||
MOVQ 8(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a2×b4
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a3×b3
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r1 += 19×a4×b2
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R9
|
||||
ADCQ DX, R8
|
||||
|
||||
// r2 = a0×b2
|
||||
MOVQ (CX), AX
|
||||
MULQ 16(BX)
|
||||
MOVQ AX, R11
|
||||
MOVQ DX, R10
|
||||
|
||||
// r2 += a1×b1
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += a2×b0
|
||||
MOVQ 16(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += 19×a3×b4
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r2 += 19×a4×b3
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R11
|
||||
ADCQ DX, R10
|
||||
|
||||
// r3 = a0×b3
|
||||
MOVQ (CX), AX
|
||||
MULQ 24(BX)
|
||||
MOVQ AX, R13
|
||||
MOVQ DX, R12
|
||||
|
||||
// r3 += a1×b2
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += a2×b1
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += a3×b0
|
||||
MOVQ 24(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r3 += 19×a4×b4
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(BX)
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R12
|
||||
|
||||
// r4 = a0×b4
|
||||
MOVQ (CX), AX
|
||||
MULQ 32(BX)
|
||||
MOVQ AX, R15
|
||||
MOVQ DX, R14
|
||||
|
||||
// r4 += a1×b3
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 24(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a2×b2
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 16(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a3×b1
|
||||
MOVQ 24(CX), AX
|
||||
MULQ 8(BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// r4 += a4×b0
|
||||
MOVQ 32(CX), AX
|
||||
MULQ (BX)
|
||||
ADDQ AX, R15
|
||||
ADCQ DX, R14
|
||||
|
||||
// First reduction chain
|
||||
MOVQ $0x0007ffffffffffff, AX
|
||||
SHLQ $0x0d, DI, SI
|
||||
SHLQ $0x0d, R9, R8
|
||||
SHLQ $0x0d, R11, R10
|
||||
SHLQ $0x0d, R13, R12
|
||||
SHLQ $0x0d, R15, R14
|
||||
ANDQ AX, DI
|
||||
IMUL3Q $0x13, R14, R14
|
||||
ADDQ R14, DI
|
||||
ANDQ AX, R9
|
||||
ADDQ SI, R9
|
||||
ANDQ AX, R11
|
||||
ADDQ R8, R11
|
||||
ANDQ AX, R13
|
||||
ADDQ R10, R13
|
||||
ANDQ AX, R15
|
||||
ADDQ R12, R15
|
||||
|
||||
// Second reduction chain (carryPropagate)
|
||||
MOVQ DI, SI
|
||||
SHRQ $0x33, SI
|
||||
MOVQ R9, R8
|
||||
SHRQ $0x33, R8
|
||||
MOVQ R11, R10
|
||||
SHRQ $0x33, R10
|
||||
MOVQ R13, R12
|
||||
SHRQ $0x33, R12
|
||||
MOVQ R15, R14
|
||||
SHRQ $0x33, R14
|
||||
ANDQ AX, DI
|
||||
IMUL3Q $0x13, R14, R14
|
||||
ADDQ R14, DI
|
||||
ANDQ AX, R9
|
||||
ADDQ SI, R9
|
||||
ANDQ AX, R11
|
||||
ADDQ R8, R11
|
||||
ANDQ AX, R13
|
||||
ADDQ R10, R13
|
||||
ANDQ AX, R15
|
||||
ADDQ R12, R15
|
||||
|
||||
// Store output
|
||||
MOVQ out+0(FP), AX
|
||||
MOVQ DI, (AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R11, 16(AX)
|
||||
MOVQ R13, 24(AX)
|
||||
MOVQ R15, 32(AX)
|
||||
RET
|
||||
|
||||
// func feSquare(out *Element, a *Element)
|
||||
TEXT ·feSquare(SB), NOSPLIT, $0-16
|
||||
MOVQ a+8(FP), CX
|
||||
|
||||
// r0 = l0×l0
|
||||
MOVQ (CX), AX
|
||||
MULQ (CX)
|
||||
MOVQ AX, SI
|
||||
MOVQ DX, BX
|
||||
|
||||
// r0 += 38×l1×l4
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, SI
|
||||
ADCQ DX, BX
|
||||
|
||||
// r0 += 38×l2×l3
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, SI
|
||||
ADCQ DX, BX
|
||||
|
||||
// r1 = 2×l0×l1
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX, R8
|
||||
MOVQ DX, DI
|
||||
|
||||
// r1 += 38×l2×l4
|
||||
MOVQ 16(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R8
|
||||
ADCQ DX, DI
|
||||
|
||||
// r1 += 19×l3×l3
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, R8
|
||||
ADCQ DX, DI
|
||||
|
||||
// r2 = 2×l0×l2
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX, R10
|
||||
MOVQ DX, R9
|
||||
|
||||
// r2 += l1×l1
|
||||
MOVQ 8(CX), AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX, R10
|
||||
ADCQ DX, R9
|
||||
|
||||
// r2 += 38×l3×l4
|
||||
MOVQ 24(CX), AX
|
||||
IMUL3Q $0x26, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R10
|
||||
ADCQ DX, R9
|
||||
|
||||
// r3 = 2×l0×l3
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX, R12
|
||||
MOVQ DX, R11
|
||||
|
||||
// r3 += 2×l1×l2
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x02, AX, AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX, R12
|
||||
ADCQ DX, R11
|
||||
|
||||
// r3 += 19×l4×l4
|
||||
MOVQ 32(CX), AX
|
||||
IMUL3Q $0x13, AX, AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX, R12
|
||||
ADCQ DX, R11
|
||||
|
||||
// r4 = 2×l0×l4
|
||||
MOVQ (CX), AX
|
||||
SHLQ $0x01, AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX, R14
|
||||
MOVQ DX, R13
|
||||
|
||||
// r4 += 2×l1×l3
|
||||
MOVQ 8(CX), AX
|
||||
IMUL3Q $0x02, AX, AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX, R14
|
||||
ADCQ DX, R13
|
||||
|
||||
// r4 += l2×l2
|
||||
MOVQ 16(CX), AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX, R14
|
||||
ADCQ DX, R13
|
||||
|
||||
// First reduction chain
|
||||
MOVQ $0x0007ffffffffffff, AX
|
||||
SHLQ $0x0d, SI, BX
|
||||
SHLQ $0x0d, R8, DI
|
||||
SHLQ $0x0d, R10, R9
|
||||
SHLQ $0x0d, R12, R11
|
||||
SHLQ $0x0d, R14, R13
|
||||
ANDQ AX, SI
|
||||
IMUL3Q $0x13, R13, R13
|
||||
ADDQ R13, SI
|
||||
ANDQ AX, R8
|
||||
ADDQ BX, R8
|
||||
ANDQ AX, R10
|
||||
ADDQ DI, R10
|
||||
ANDQ AX, R12
|
||||
ADDQ R9, R12
|
||||
ANDQ AX, R14
|
||||
ADDQ R11, R14
|
||||
|
||||
// Second reduction chain (carryPropagate)
|
||||
MOVQ SI, BX
|
||||
SHRQ $0x33, BX
|
||||
MOVQ R8, DI
|
||||
SHRQ $0x33, DI
|
||||
MOVQ R10, R9
|
||||
SHRQ $0x33, R9
|
||||
MOVQ R12, R11
|
||||
SHRQ $0x33, R11
|
||||
MOVQ R14, R13
|
||||
SHRQ $0x33, R13
|
||||
ANDQ AX, SI
|
||||
IMUL3Q $0x13, R13, R13
|
||||
ADDQ R13, SI
|
||||
ANDQ AX, R8
|
||||
ADDQ BX, R8
|
||||
ANDQ AX, R10
|
||||
ADDQ DI, R10
|
||||
ANDQ AX, R12
|
||||
ADDQ R9, R12
|
||||
ANDQ AX, R14
|
||||
ADDQ R11, R14
|
||||
|
||||
// Store output
|
||||
MOVQ out+0(FP), AX
|
||||
MOVQ SI, (AX)
|
||||
MOVQ R8, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R12, 24(AX)
|
||||
MOVQ R14, 32(AX)
|
||||
RET
|
||||
12
vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
generated
vendored
Normal file
12
vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || !gc || purego
|
||||
// +build !amd64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
|
||||
|
||||
func feSquare(v, x *Element) { feSquareGeneric(v, x) }
|
||||
16
vendor/filippo.io/edwards25519/field/fe_arm64.go
generated
vendored
Normal file
16
vendor/filippo.io/edwards25519/field/fe_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build arm64 && gc && !purego
|
||||
// +build arm64,gc,!purego
|
||||
|
||||
package field
|
||||
|
||||
//go:noescape
|
||||
func carryPropagate(v *Element)
|
||||
|
||||
func (v *Element) carryPropagate() *Element {
|
||||
carryPropagate(v)
|
||||
return v
|
||||
}
|
||||
42
vendor/filippo.io/edwards25519/field/fe_arm64.s
generated
vendored
Normal file
42
vendor/filippo.io/edwards25519/field/fe_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright (c) 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build arm64 && gc && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// carryPropagate works exactly like carryPropagateGeneric and uses the
|
||||
// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
|
||||
// avoids loading R0-R4 twice and uses LDP and STP.
|
||||
//
|
||||
// See https://golang.org/issues/43145 for the main compiler issue.
|
||||
//
|
||||
// func carryPropagate(v *Element)
|
||||
TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
|
||||
MOVD v+0(FP), R20
|
||||
|
||||
LDP 0(R20), (R0, R1)
|
||||
LDP 16(R20), (R2, R3)
|
||||
MOVD 32(R20), R4
|
||||
|
||||
AND $0x7ffffffffffff, R0, R10
|
||||
AND $0x7ffffffffffff, R1, R11
|
||||
AND $0x7ffffffffffff, R2, R12
|
||||
AND $0x7ffffffffffff, R3, R13
|
||||
AND $0x7ffffffffffff, R4, R14
|
||||
|
||||
ADD R0>>51, R11, R11
|
||||
ADD R1>>51, R12, R12
|
||||
ADD R2>>51, R13, R13
|
||||
ADD R3>>51, R14, R14
|
||||
// R4>>51 * 19 + R10 -> R10
|
||||
LSR $51, R4, R21
|
||||
MOVD $19, R22
|
||||
MADD R22, R10, R21, R10
|
||||
|
||||
STP (R10, R11), 0(R20)
|
||||
STP (R12, R13), 16(R20)
|
||||
MOVD R14, 32(R20)
|
||||
|
||||
RET
|
||||
12
vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
generated
vendored
Normal file
12
vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !arm64 || !gc || purego
|
||||
// +build !arm64 !gc purego
|
||||
|
||||
package field
|
||||
|
||||
func (v *Element) carryPropagate() *Element {
|
||||
return v.carryPropagateGeneric()
|
||||
}
|
||||
50
vendor/filippo.io/edwards25519/field/fe_extra.go
generated
vendored
Normal file
50
vendor/filippo.io/edwards25519/field/fe_extra.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright (c) 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package field
|
||||
|
||||
import "errors"
|
||||
|
||||
// This file contains additional functionality that is not included in the
|
||||
// upstream crypto/ed25519/edwards25519/field package.
|
||||
|
||||
// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
|
||||
// is reduced modulo the field order. If x is not of the right length,
|
||||
// SetWideBytes returns nil and an error, and the receiver is unchanged.
|
||||
//
|
||||
// SetWideBytes is not necessary to select a uniformly distributed value, and is
|
||||
// only provided for compatibility: SetBytes can be used instead as the chance
|
||||
// of bias is less than 2⁻²⁵⁰.
|
||||
func (v *Element) SetWideBytes(x []byte) (*Element, error) {
|
||||
if len(x) != 64 {
|
||||
return nil, errors.New("edwards25519: invalid SetWideBytes input size")
|
||||
}
|
||||
|
||||
// Split the 64 bytes into two elements, and extract the most significant
|
||||
// bit of each, which is ignored by SetBytes.
|
||||
lo, _ := new(Element).SetBytes(x[:32])
|
||||
loMSB := uint64(x[31] >> 7)
|
||||
hi, _ := new(Element).SetBytes(x[32:])
|
||||
hiMSB := uint64(x[63] >> 7)
|
||||
|
||||
// The output we want is
|
||||
//
|
||||
// v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
|
||||
//
|
||||
// which applying the reduction identity comes out to
|
||||
//
|
||||
// v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
|
||||
//
|
||||
// l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
|
||||
// (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
|
||||
// (hiMSB * 2 * 19²), so it fits in a uint64.
|
||||
|
||||
v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
|
||||
v.l1 = lo.l1 + hi.l1*2*19
|
||||
v.l2 = lo.l2 + hi.l2*2*19
|
||||
v.l3 = lo.l3 + hi.l3*2*19
|
||||
v.l4 = lo.l4 + hi.l4*2*19
|
||||
|
||||
return v.carryPropagate(), nil
|
||||
}
|
||||
266
vendor/filippo.io/edwards25519/field/fe_generic.go
generated
vendored
Normal file
266
vendor/filippo.io/edwards25519/field/fe_generic.go
generated
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
// Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package field
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
// bits.Mul64 and bits.Add64 intrinsics.
|
||||
type uint128 struct {
|
||||
lo, hi uint64
|
||||
}
|
||||
|
||||
// mul64 returns a * b.
|
||||
func mul64(a, b uint64) uint128 {
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
// addMul64 returns v + a * b.
|
||||
func addMul64(v uint128, a, b uint64) uint128 {
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
lo, c := bits.Add64(lo, v.lo, 0)
|
||||
hi, _ = bits.Add64(hi, v.hi, c)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
|
||||
func shiftRightBy51(a uint128) uint64 {
|
||||
return (a.hi << (64 - 51)) | (a.lo >> 51)
|
||||
}
|
||||
|
||||
func feMulGeneric(v, a, b *Element) {
|
||||
a0 := a.l0
|
||||
a1 := a.l1
|
||||
a2 := a.l2
|
||||
a3 := a.l3
|
||||
a4 := a.l4
|
||||
|
||||
b0 := b.l0
|
||||
b1 := b.l1
|
||||
b2 := b.l2
|
||||
b3 := b.l3
|
||||
b4 := b.l4
|
||||
|
||||
// Limb multiplication works like pen-and-paper columnar multiplication, but
|
||||
// with 51-bit limbs instead of digits.
|
||||
//
|
||||
// a4 a3 a2 a1 a0 x
|
||||
// b4 b3 b2 b1 b0 =
|
||||
// ------------------------
|
||||
// a4b0 a3b0 a2b0 a1b0 a0b0 +
|
||||
// a4b1 a3b1 a2b1 a1b1 a0b1 +
|
||||
// a4b2 a3b2 a2b2 a1b2 a0b2 +
|
||||
// a4b3 a3b3 a2b3 a1b3 a0b3 +
|
||||
// a4b4 a3b4 a2b4 a1b4 a0b4 =
|
||||
// ----------------------------------------------
|
||||
// r8 r7 r6 r5 r4 r3 r2 r1 r0
|
||||
//
|
||||
// We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
|
||||
// reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
|
||||
// r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
|
||||
//
|
||||
// Reduction can be carried out simultaneously to multiplication. For
|
||||
// example, we do not compute r5: whenever the result of a multiplication
|
||||
// belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
|
||||
//
|
||||
// a4b0 a3b0 a2b0 a1b0 a0b0 +
|
||||
// a3b1 a2b1 a1b1 a0b1 19×a4b1 +
|
||||
// a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
|
||||
// a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
|
||||
// a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
|
||||
// --------------------------------------
|
||||
// r4 r3 r2 r1 r0
|
||||
//
|
||||
// Finally we add up the columns into wide, overlapping limbs.
|
||||
|
||||
a1_19 := a1 * 19
|
||||
a2_19 := a2 * 19
|
||||
a3_19 := a3 * 19
|
||||
a4_19 := a4 * 19
|
||||
|
||||
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
|
||||
r0 := mul64(a0, b0)
|
||||
r0 = addMul64(r0, a1_19, b4)
|
||||
r0 = addMul64(r0, a2_19, b3)
|
||||
r0 = addMul64(r0, a3_19, b2)
|
||||
r0 = addMul64(r0, a4_19, b1)
|
||||
|
||||
// r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
|
||||
r1 := mul64(a0, b1)
|
||||
r1 = addMul64(r1, a1, b0)
|
||||
r1 = addMul64(r1, a2_19, b4)
|
||||
r1 = addMul64(r1, a3_19, b3)
|
||||
r1 = addMul64(r1, a4_19, b2)
|
||||
|
||||
// r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
|
||||
r2 := mul64(a0, b2)
|
||||
r2 = addMul64(r2, a1, b1)
|
||||
r2 = addMul64(r2, a2, b0)
|
||||
r2 = addMul64(r2, a3_19, b4)
|
||||
r2 = addMul64(r2, a4_19, b3)
|
||||
|
||||
// r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
|
||||
r3 := mul64(a0, b3)
|
||||
r3 = addMul64(r3, a1, b2)
|
||||
r3 = addMul64(r3, a2, b1)
|
||||
r3 = addMul64(r3, a3, b0)
|
||||
r3 = addMul64(r3, a4_19, b4)
|
||||
|
||||
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
|
||||
r4 := mul64(a0, b4)
|
||||
r4 = addMul64(r4, a1, b3)
|
||||
r4 = addMul64(r4, a2, b2)
|
||||
r4 = addMul64(r4, a3, b1)
|
||||
r4 = addMul64(r4, a4, b0)
|
||||
|
||||
// After the multiplication, we need to reduce (carry) the five coefficients
|
||||
// to obtain a result with limbs that are at most slightly larger than 2⁵¹,
|
||||
// to respect the Element invariant.
|
||||
//
|
||||
// Overall, the reduction works the same as carryPropagate, except with
|
||||
// wider inputs: we take the carry for each coefficient by shifting it right
|
||||
// by 51, and add it to the limb above it. The top carry is multiplied by 19
|
||||
// according to the reduction identity and added to the lowest limb.
|
||||
//
|
||||
// The largest coefficient (r0) will be at most 111 bits, which guarantees
|
||||
// that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
|
||||
//
|
||||
// r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
|
||||
// r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
|
||||
// r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
|
||||
// r0 < 2⁷ × 2⁵² × 2⁵²
|
||||
// r0 < 2¹¹¹
|
||||
//
|
||||
// Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
|
||||
// 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
|
||||
// allows us to easily apply the reduction identity.
|
||||
//
|
||||
// r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
|
||||
// r4 < 5 × 2⁵² × 2⁵²
|
||||
// r4 < 2¹⁰⁷
|
||||
//
|
||||
|
||||
c0 := shiftRightBy51(r0)
|
||||
c1 := shiftRightBy51(r1)
|
||||
c2 := shiftRightBy51(r2)
|
||||
c3 := shiftRightBy51(r3)
|
||||
c4 := shiftRightBy51(r4)
|
||||
|
||||
rr0 := r0.lo&maskLow51Bits + c4*19
|
||||
rr1 := r1.lo&maskLow51Bits + c0
|
||||
rr2 := r2.lo&maskLow51Bits + c1
|
||||
rr3 := r3.lo&maskLow51Bits + c2
|
||||
rr4 := r4.lo&maskLow51Bits + c3
|
||||
|
||||
// Now all coefficients fit into 64-bit registers but are still too large to
|
||||
// be passed around as an Element. We therefore do one last carry chain,
|
||||
// where the carries will be small enough to fit in the wiggle room above 2⁵¹.
|
||||
*v = Element{rr0, rr1, rr2, rr3, rr4}
|
||||
v.carryPropagate()
|
||||
}
|
||||
|
||||
func feSquareGeneric(v, a *Element) {
|
||||
l0 := a.l0
|
||||
l1 := a.l1
|
||||
l2 := a.l2
|
||||
l3 := a.l3
|
||||
l4 := a.l4
|
||||
|
||||
// Squaring works precisely like multiplication above, but thanks to its
|
||||
// symmetry we get to group a few terms together.
|
||||
//
|
||||
// l4 l3 l2 l1 l0 x
|
||||
// l4 l3 l2 l1 l0 =
|
||||
// ------------------------
|
||||
// l4l0 l3l0 l2l0 l1l0 l0l0 +
|
||||
// l4l1 l3l1 l2l1 l1l1 l0l1 +
|
||||
// l4l2 l3l2 l2l2 l1l2 l0l2 +
|
||||
// l4l3 l3l3 l2l3 l1l3 l0l3 +
|
||||
// l4l4 l3l4 l2l4 l1l4 l0l4 =
|
||||
// ----------------------------------------------
|
||||
// r8 r7 r6 r5 r4 r3 r2 r1 r0
|
||||
//
|
||||
// l4l0 l3l0 l2l0 l1l0 l0l0 +
|
||||
// l3l1 l2l1 l1l1 l0l1 19×l4l1 +
|
||||
// l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
|
||||
// l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
|
||||
// l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
|
||||
// --------------------------------------
|
||||
// r4 r3 r2 r1 r0
|
||||
//
|
||||
// With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
|
||||
// only three Mul64 and four Add64, instead of five and eight.
|
||||
|
||||
l0_2 := l0 * 2
|
||||
l1_2 := l1 * 2
|
||||
|
||||
l1_38 := l1 * 38
|
||||
l2_38 := l2 * 38
|
||||
l3_38 := l3 * 38
|
||||
|
||||
l3_19 := l3 * 19
|
||||
l4_19 := l4 * 19
|
||||
|
||||
// r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
|
||||
r0 := mul64(l0, l0)
|
||||
r0 = addMul64(r0, l1_38, l4)
|
||||
r0 = addMul64(r0, l2_38, l3)
|
||||
|
||||
// r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
|
||||
r1 := mul64(l0_2, l1)
|
||||
r1 = addMul64(r1, l2_38, l4)
|
||||
r1 = addMul64(r1, l3_19, l3)
|
||||
|
||||
// r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
|
||||
r2 := mul64(l0_2, l2)
|
||||
r2 = addMul64(r2, l1, l1)
|
||||
r2 = addMul64(r2, l3_38, l4)
|
||||
|
||||
// r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
|
||||
r3 := mul64(l0_2, l3)
|
||||
r3 = addMul64(r3, l1_2, l2)
|
||||
r3 = addMul64(r3, l4_19, l4)
|
||||
|
||||
// r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
|
||||
r4 := mul64(l0_2, l4)
|
||||
r4 = addMul64(r4, l1_2, l3)
|
||||
r4 = addMul64(r4, l2, l2)
|
||||
|
||||
c0 := shiftRightBy51(r0)
|
||||
c1 := shiftRightBy51(r1)
|
||||
c2 := shiftRightBy51(r2)
|
||||
c3 := shiftRightBy51(r3)
|
||||
c4 := shiftRightBy51(r4)
|
||||
|
||||
rr0 := r0.lo&maskLow51Bits + c4*19
|
||||
rr1 := r1.lo&maskLow51Bits + c0
|
||||
rr2 := r2.lo&maskLow51Bits + c1
|
||||
rr3 := r3.lo&maskLow51Bits + c2
|
||||
rr4 := r4.lo&maskLow51Bits + c3
|
||||
|
||||
*v = Element{rr0, rr1, rr2, rr3, rr4}
|
||||
v.carryPropagate()
|
||||
}
|
||||
|
||||
// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
|
||||
// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
|
||||
func (v *Element) carryPropagateGeneric() *Element {
|
||||
c0 := v.l0 >> 51
|
||||
c1 := v.l1 >> 51
|
||||
c2 := v.l2 >> 51
|
||||
c3 := v.l3 >> 51
|
||||
c4 := v.l4 >> 51
|
||||
|
||||
// c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
|
||||
// the final l0 will be at most 52 bits. Similarly for the rest.
|
||||
v.l0 = v.l0&maskLow51Bits + c4*19
|
||||
v.l1 = v.l1&maskLow51Bits + c0
|
||||
v.l2 = v.l2&maskLow51Bits + c1
|
||||
v.l3 = v.l3&maskLow51Bits + c2
|
||||
v.l4 = v.l4&maskLow51Bits + c3
|
||||
|
||||
return v
|
||||
}
|
||||
343
vendor/filippo.io/edwards25519/scalar.go
generated
vendored
Normal file
343
vendor/filippo.io/edwards25519/scalar.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
// Copyright (c) 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// A Scalar is an integer modulo
|
||||
//
|
||||
// l = 2^252 + 27742317777372353535851937790883648493
|
||||
//
|
||||
// which is the prime order of the edwards25519 group.
|
||||
//
|
||||
// This type works similarly to math/big.Int, and all arguments and
|
||||
// receivers are allowed to alias.
|
||||
//
|
||||
// The zero value is a valid zero element.
|
||||
type Scalar struct {
|
||||
// s is the scalar in the Montgomery domain, in the format of the
|
||||
// fiat-crypto implementation.
|
||||
s fiatScalarMontgomeryDomainFieldElement
|
||||
}
|
||||
|
||||
// The field implementation in scalar_fiat.go is generated by the fiat-crypto
|
||||
// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
|
||||
// from a formally verified model.
|
||||
//
|
||||
// fiat-crypto code comes under the following license.
|
||||
//
|
||||
// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
|
||||
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
|
||||
// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
|
||||
// NewScalar returns a new zero Scalar.
|
||||
func NewScalar() *Scalar {
|
||||
return &Scalar{}
|
||||
}
|
||||
|
||||
// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
|
||||
// using Multiply and then Add.
|
||||
func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
|
||||
// Make a copy of z in case it aliases s.
|
||||
zCopy := new(Scalar).Set(z)
|
||||
return s.Multiply(x, y).Add(s, zCopy)
|
||||
}
|
||||
|
||||
// Add sets s = x + y mod l, and returns s.
|
||||
func (s *Scalar) Add(x, y *Scalar) *Scalar {
|
||||
// s = 1 * x + y mod l
|
||||
fiatScalarAdd(&s.s, &x.s, &y.s)
|
||||
return s
|
||||
}
|
||||
|
||||
// Subtract sets s = x - y mod l, and returns s.
|
||||
func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
|
||||
// s = -1 * y + x mod l
|
||||
fiatScalarSub(&s.s, &x.s, &y.s)
|
||||
return s
|
||||
}
|
||||
|
||||
// Negate sets s = -x mod l, and returns s.
|
||||
func (s *Scalar) Negate(x *Scalar) *Scalar {
|
||||
// s = -1 * x + 0 mod l
|
||||
fiatScalarOpp(&s.s, &x.s)
|
||||
return s
|
||||
}
|
||||
|
||||
// Multiply sets s = x * y mod l, and returns s.
|
||||
func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
|
||||
// s = x * y + 0 mod l
|
||||
fiatScalarMul(&s.s, &x.s, &y.s)
|
||||
return s
|
||||
}
|
||||
|
||||
// Set sets s = x, and returns s.
|
||||
func (s *Scalar) Set(x *Scalar) *Scalar {
|
||||
*s = *x
|
||||
return s
|
||||
}
|
||||
|
||||
// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
|
||||
// If x is not of the right length, SetUniformBytes returns nil and an error,
|
||||
// and the receiver is unchanged.
|
||||
//
|
||||
// SetUniformBytes can be used to set s to a uniformly distributed value given
|
||||
// 64 uniformly distributed random bytes.
|
||||
func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
|
||||
if len(x) != 64 {
|
||||
return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
|
||||
}
|
||||
|
||||
// We have a value x of 512 bits, but our fiatScalarFromBytes function
|
||||
// expects an input lower than l, which is a little over 252 bits.
|
||||
//
|
||||
// Instead of writing a reduction function that operates on wider inputs, we
|
||||
// can interpret x as the sum of three shorter values a, b, and c.
|
||||
//
|
||||
// x = a + b * 2^168 + c * 2^336 mod l
|
||||
//
|
||||
// We then precompute 2^168 and 2^336 modulo l, and perform the reduction
|
||||
// with two multiplications and two additions.
|
||||
|
||||
s.setShortBytes(x[:21])
|
||||
t := new(Scalar).setShortBytes(x[21:42])
|
||||
s.Add(s, t.Multiply(t, scalarTwo168))
|
||||
t.setShortBytes(x[42:])
|
||||
s.Add(s, t.Multiply(t, scalarTwo336))
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
|
||||
// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
|
||||
// in the 2^256 Montgomery domain.
|
||||
var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
|
||||
0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
|
||||
var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
|
||||
0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
|
||||
|
||||
// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
|
||||
// than 32 bytes.
|
||||
func (s *Scalar) setShortBytes(x []byte) *Scalar {
|
||||
if len(x) >= 32 {
|
||||
panic("edwards25519: internal error: setShortBytes called with a long string")
|
||||
}
|
||||
var buf [32]byte
|
||||
copy(buf[:], x)
|
||||
fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
|
||||
fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
|
||||
return s
|
||||
}
|
||||
|
||||
// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
|
||||
// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
|
||||
// returns nil and an error, and the receiver is unchanged.
|
||||
func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
|
||||
if len(x) != 32 {
|
||||
return nil, errors.New("invalid scalar length")
|
||||
}
|
||||
if !isReduced(x) {
|
||||
return nil, errors.New("invalid scalar encoding")
|
||||
}
|
||||
|
||||
fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
|
||||
fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// scalarMinusOneBytes is l - 1 in little endian.
|
||||
var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
|
||||
|
||||
// isReduced returns whether the given scalar in 32-byte little endian encoded
|
||||
// form is reduced modulo l.
|
||||
func isReduced(s []byte) bool {
|
||||
if len(s) != 32 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := len(s) - 1; i >= 0; i-- {
|
||||
switch {
|
||||
case s[i] > scalarMinusOneBytes[i]:
|
||||
return false
|
||||
case s[i] < scalarMinusOneBytes[i]:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
|
||||
// Section 5.1.5 (also known as clamping) and sets s to the result. The input
|
||||
// must be 32 bytes, and it is not modified. If x is not of the right length,
|
||||
// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
|
||||
//
|
||||
// Note that since Scalar values are always reduced modulo the prime order of
|
||||
// the curve, the resulting value will not preserve any of the cofactor-clearing
|
||||
// properties that clamping is meant to provide. It will however work as
|
||||
// expected as long as it is applied to points on the prime order subgroup, like
|
||||
// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
|
||||
// irrelevant RFC 7748 clamping, but it is now required for compatibility.
|
||||
func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
|
||||
// The description above omits the purpose of the high bits of the clamping
|
||||
// for brevity, but those are also lost to reductions, and are also
|
||||
// irrelevant to edwards25519 as they protect against a specific
|
||||
// implementation bug that was once observed in a generic Montgomery ladder.
|
||||
if len(x) != 32 {
|
||||
return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
|
||||
}
|
||||
|
||||
// We need to use the wide reduction from SetUniformBytes, since clamping
|
||||
// sets the 2^254 bit, making the value higher than the order.
|
||||
var wideBytes [64]byte
|
||||
copy(wideBytes[:], x[:])
|
||||
wideBytes[0] &= 248
|
||||
wideBytes[31] &= 63
|
||||
wideBytes[31] |= 64
|
||||
return s.SetUniformBytes(wideBytes[:])
|
||||
}
|
||||
|
||||
// Bytes returns the canonical 32-byte little-endian encoding of s.
|
||||
func (s *Scalar) Bytes() []byte {
|
||||
// This function is outlined to make the allocations inline in the caller
|
||||
// rather than happen on the heap.
|
||||
var encoded [32]byte
|
||||
return s.bytes(&encoded)
|
||||
}
|
||||
|
||||
func (s *Scalar) bytes(out *[32]byte) []byte {
|
||||
var ss fiatScalarNonMontgomeryDomainFieldElement
|
||||
fiatScalarFromMontgomery(&ss, &s.s)
|
||||
fiatScalarToBytes(out, (*[4]uint64)(&ss))
|
||||
return out[:]
|
||||
}
|
||||
|
||||
// Equal returns 1 if s and t are equal, and 0 otherwise.
|
||||
func (s *Scalar) Equal(t *Scalar) int {
|
||||
var diff fiatScalarMontgomeryDomainFieldElement
|
||||
fiatScalarSub(&diff, &s.s, &t.s)
|
||||
var nonzero uint64
|
||||
fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
|
||||
nonzero |= nonzero >> 32
|
||||
nonzero |= nonzero >> 16
|
||||
nonzero |= nonzero >> 8
|
||||
nonzero |= nonzero >> 4
|
||||
nonzero |= nonzero >> 2
|
||||
nonzero |= nonzero >> 1
|
||||
return int(^nonzero) & 1
|
||||
}
|
||||
|
||||
// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
|
||||
//
|
||||
// w must be between 2 and 8, or nonAdjacentForm will panic.
|
||||
func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
|
||||
// This implementation is adapted from the one
|
||||
// in curve25519-dalek and is documented there:
|
||||
// https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
|
||||
b := s.Bytes()
|
||||
if b[31] > 127 {
|
||||
panic("scalar has high bit set illegally")
|
||||
}
|
||||
if w < 2 {
|
||||
panic("w must be at least 2 by the definition of NAF")
|
||||
} else if w > 8 {
|
||||
panic("NAF digits must fit in int8")
|
||||
}
|
||||
|
||||
var naf [256]int8
|
||||
var digits [5]uint64
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
digits[i] = binary.LittleEndian.Uint64(b[i*8:])
|
||||
}
|
||||
|
||||
width := uint64(1 << w)
|
||||
windowMask := uint64(width - 1)
|
||||
|
||||
pos := uint(0)
|
||||
carry := uint64(0)
|
||||
for pos < 256 {
|
||||
indexU64 := pos / 64
|
||||
indexBit := pos % 64
|
||||
var bitBuf uint64
|
||||
if indexBit < 64-w {
|
||||
// This window's bits are contained in a single u64
|
||||
bitBuf = digits[indexU64] >> indexBit
|
||||
} else {
|
||||
// Combine the current 64 bits with bits from the next 64
|
||||
bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
|
||||
}
|
||||
|
||||
// Add carry into the current window
|
||||
window := carry + (bitBuf & windowMask)
|
||||
|
||||
if window&1 == 0 {
|
||||
// If the window value is even, preserve the carry and continue.
|
||||
// Why is the carry preserved?
|
||||
// If carry == 0 and window & 1 == 0,
|
||||
// then the next carry should be 0
|
||||
// If carry == 1 and window & 1 == 0,
|
||||
// then bit_buf & 1 == 1 so the next carry should be 1
|
||||
pos += 1
|
||||
continue
|
||||
}
|
||||
|
||||
if window < width/2 {
|
||||
carry = 0
|
||||
naf[pos] = int8(window)
|
||||
} else {
|
||||
carry = 1
|
||||
naf[pos] = int8(window) - int8(width)
|
||||
}
|
||||
|
||||
pos += w
|
||||
}
|
||||
return naf
|
||||
}
|
||||
|
||||
func (s *Scalar) signedRadix16() [64]int8 {
|
||||
b := s.Bytes()
|
||||
if b[31] > 127 {
|
||||
panic("scalar has high bit set illegally")
|
||||
}
|
||||
|
||||
var digits [64]int8
|
||||
|
||||
// Compute unsigned radix-16 digits:
|
||||
for i := 0; i < 32; i++ {
|
||||
digits[2*i] = int8(b[i] & 15)
|
||||
digits[2*i+1] = int8((b[i] >> 4) & 15)
|
||||
}
|
||||
|
||||
// Recenter coefficients:
|
||||
for i := 0; i < 63; i++ {
|
||||
carry := (digits[i] + 8) >> 4
|
||||
digits[i] -= carry << 4
|
||||
digits[i+1] += carry
|
||||
}
|
||||
|
||||
return digits
|
||||
}
|
||||
1147
vendor/filippo.io/edwards25519/scalar_fiat.go
generated
vendored
Normal file
1147
vendor/filippo.io/edwards25519/scalar_fiat.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
214
vendor/filippo.io/edwards25519/scalarmult.go
generated
vendored
Normal file
214
vendor/filippo.io/edwards25519/scalarmult.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import "sync"
|
||||
|
||||
// basepointTable is a set of 32 affineLookupTables, where table i is generated
|
||||
// from 256i * basepoint. It is precomputed the first time it's used.
|
||||
func basepointTable() *[32]affineLookupTable {
|
||||
basepointTablePrecomp.initOnce.Do(func() {
|
||||
p := NewGeneratorPoint()
|
||||
for i := 0; i < 32; i++ {
|
||||
basepointTablePrecomp.table[i].FromP3(p)
|
||||
for j := 0; j < 8; j++ {
|
||||
p.Add(p, p)
|
||||
}
|
||||
}
|
||||
})
|
||||
return &basepointTablePrecomp.table
|
||||
}
|
||||
|
||||
var basepointTablePrecomp struct {
|
||||
table [32]affineLookupTable
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
|
||||
// returns v.
|
||||
//
|
||||
// The scalar multiplication is done in constant time.
|
||||
func (v *Point) ScalarBaseMult(x *Scalar) *Point {
|
||||
basepointTable := basepointTable()
|
||||
|
||||
// Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
|
||||
// as described in the Ed25519 paper
|
||||
//
|
||||
// Group even and odd coefficients
|
||||
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
|
||||
// + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
|
||||
// x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
|
||||
// + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
|
||||
//
|
||||
// We use a lookup table for each i to get x_i*16^(2*i)*B
|
||||
// and do four doublings to multiply by 16.
|
||||
digits := x.signedRadix16()
|
||||
|
||||
multiple := &affineCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
|
||||
// Accumulate the odd components first
|
||||
v.Set(NewIdentityPoint())
|
||||
for i := 1; i < 64; i += 2 {
|
||||
basepointTable[i/2].SelectInto(multiple, digits[i])
|
||||
tmp1.AddAffine(v, multiple)
|
||||
v.fromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
// Multiply by 16
|
||||
tmp2.FromP3(v) // tmp2 = v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // now v = 16*(odd components)
|
||||
|
||||
// Accumulate the even components
|
||||
for i := 0; i < 64; i += 2 {
|
||||
basepointTable[i/2].SelectInto(multiple, digits[i])
|
||||
tmp1.AddAffine(v, multiple)
|
||||
v.fromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// ScalarMult sets v = x * q, and returns v.
|
||||
//
|
||||
// The scalar multiplication is done in constant time.
|
||||
func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
|
||||
checkInitialized(q)
|
||||
|
||||
var table projLookupTable
|
||||
table.FromP3(q)
|
||||
|
||||
// Write x = sum(x_i * 16^i)
|
||||
// so x*Q = sum( Q*x_i*16^i )
|
||||
// = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
|
||||
// <------compute inside out---------
|
||||
//
|
||||
// We use the lookup table to get the x_i*Q values
|
||||
// and do four doublings to compute 16*Q
|
||||
digits := x.signedRadix16()
|
||||
|
||||
// Unwrap first loop iteration to save computing 16*identity
|
||||
multiple := &projCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
table.SelectInto(multiple, digits[63])
|
||||
|
||||
v.Set(NewIdentityPoint())
|
||||
tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
|
||||
for i := 62; i >= 0; i-- {
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
|
||||
tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
|
||||
tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
|
||||
v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
|
||||
table.SelectInto(multiple, digits[i])
|
||||
tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
|
||||
}
|
||||
v.fromP1xP1(tmp1)
|
||||
return v
|
||||
}
|
||||
|
||||
// basepointNafTable is the nafLookupTable8 for the basepoint.
|
||||
// It is precomputed the first time it's used.
|
||||
func basepointNafTable() *nafLookupTable8 {
|
||||
basepointNafTablePrecomp.initOnce.Do(func() {
|
||||
basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
|
||||
})
|
||||
return &basepointNafTablePrecomp.table
|
||||
}
|
||||
|
||||
var basepointNafTablePrecomp struct {
|
||||
table nafLookupTable8
|
||||
initOnce sync.Once
|
||||
}
|
||||
|
||||
// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
|
||||
// generator, and returns v.
|
||||
//
|
||||
// Execution time depends on the inputs.
|
||||
func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
|
||||
checkInitialized(A)
|
||||
|
||||
// Similarly to the single variable-base approach, we compute
|
||||
// digits and use them with a lookup table. However, because
|
||||
// we are allowed to do variable-time operations, we don't
|
||||
// need constant-time lookups or constant-time digit
|
||||
// computations.
|
||||
//
|
||||
// So we use a non-adjacent form of some width w instead of
|
||||
// radix 16. This is like a binary representation (one digit
|
||||
// for each binary place) but we allow the digits to grow in
|
||||
// magnitude up to 2^{w-1} so that the nonzero digits are as
|
||||
// sparse as possible. Intuitively, this "condenses" the
|
||||
// "mass" of the scalar onto sparse coefficients (meaning
|
||||
// fewer additions).
|
||||
|
||||
basepointNafTable := basepointNafTable()
|
||||
var aTable nafLookupTable5
|
||||
aTable.FromP3(A)
|
||||
// Because the basepoint is fixed, we can use a wider NAF
|
||||
// corresponding to a bigger table.
|
||||
aNaf := a.nonAdjacentForm(5)
|
||||
bNaf := b.nonAdjacentForm(8)
|
||||
|
||||
// Find the first nonzero coefficient.
|
||||
i := 255
|
||||
for j := i; j >= 0; j-- {
|
||||
if aNaf[j] != 0 || bNaf[j] != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
multA := &projCached{}
|
||||
multB := &affineCached{}
|
||||
tmp1 := &projP1xP1{}
|
||||
tmp2 := &projP2{}
|
||||
tmp2.Zero()
|
||||
|
||||
// Move from high to low bits, doubling the accumulator
|
||||
// at each iteration and checking whether there is a nonzero
|
||||
// coefficient to look up a multiple of.
|
||||
for ; i >= 0; i-- {
|
||||
tmp1.Double(tmp2)
|
||||
|
||||
// Only update v if we have a nonzero coeff to add in.
|
||||
if aNaf[i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
aTable.SelectInto(multA, aNaf[i])
|
||||
tmp1.Add(v, multA)
|
||||
} else if aNaf[i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
aTable.SelectInto(multA, -aNaf[i])
|
||||
tmp1.Sub(v, multA)
|
||||
}
|
||||
|
||||
if bNaf[i] > 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
basepointNafTable.SelectInto(multB, bNaf[i])
|
||||
tmp1.AddAffine(v, multB)
|
||||
} else if bNaf[i] < 0 {
|
||||
v.fromP1xP1(tmp1)
|
||||
basepointNafTable.SelectInto(multB, -bNaf[i])
|
||||
tmp1.SubAffine(v, multB)
|
||||
}
|
||||
|
||||
tmp2.FromP1xP1(tmp1)
|
||||
}
|
||||
|
||||
v.fromP2(tmp2)
|
||||
return v
|
||||
}
|
||||
129
vendor/filippo.io/edwards25519/tables.go
generated
vendored
Normal file
129
vendor/filippo.io/edwards25519/tables.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright (c) 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package edwards25519
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
)
|
||||
|
||||
// A dynamic lookup table for variable-base, constant-time scalar muls.
|
||||
type projLookupTable struct {
|
||||
points [8]projCached
|
||||
}
|
||||
|
||||
// A precomputed lookup table for fixed-base, constant-time scalar muls.
|
||||
type affineLookupTable struct {
|
||||
points [8]affineCached
|
||||
}
|
||||
|
||||
// A dynamic lookup table for variable-base, variable-time scalar muls.
|
||||
type nafLookupTable5 struct {
|
||||
points [8]projCached
|
||||
}
|
||||
|
||||
// A precomputed lookup table for fixed-base, variable-time scalar muls.
|
||||
type nafLookupTable8 struct {
|
||||
points [64]affineCached
|
||||
}
|
||||
|
||||
// Constructors.
|
||||
|
||||
// Builds a lookup table at runtime. Fast.
|
||||
func (v *projLookupTable) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
|
||||
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
|
||||
v.points[0].FromP3(q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
// Compute (i+1)*Q as Q + i*Q and convert to a projCached
|
||||
// This is needlessly complicated because the API has explicit
|
||||
// receivers instead of creating stack objects and relying on RVO
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// This is not optimised for speed; fixed-base tables should be precomputed.
|
||||
func (v *affineLookupTable) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
|
||||
// This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
|
||||
v.points[0].FromP3(q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
// Compute (i+1)*Q as Q + i*Q and convert to affineCached
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// Builds a lookup table at runtime. Fast.
|
||||
func (v *nafLookupTable5) FromP3(q *Point) {
|
||||
// Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
|
||||
// This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
|
||||
v.points[0].FromP3(q)
|
||||
q2 := Point{}
|
||||
q2.Add(q, q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 7; i++ {
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// This is not optimised for speed; fixed-base tables should be precomputed.
|
||||
func (v *nafLookupTable8) FromP3(q *Point) {
|
||||
v.points[0].FromP3(q)
|
||||
q2 := Point{}
|
||||
q2.Add(q, q)
|
||||
tmpP3 := Point{}
|
||||
tmpP1xP1 := projP1xP1{}
|
||||
for i := 0; i < 63; i++ {
|
||||
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
|
||||
}
|
||||
}
|
||||
|
||||
// Selectors.
|
||||
|
||||
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
|
||||
func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
|
||||
// Compute xabs = |x|
|
||||
xmask := x >> 7
|
||||
xabs := uint8((x + xmask) ^ xmask)
|
||||
|
||||
dest.Zero()
|
||||
for j := 1; j <= 8; j++ {
|
||||
// Set dest = j*Q if |x| = j
|
||||
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
|
||||
dest.Select(&v.points[j-1], dest, cond)
|
||||
}
|
||||
// Now dest = |x|*Q, conditionally negate to get x*Q
|
||||
dest.CondNeg(int(xmask & 1))
|
||||
}
|
||||
|
||||
// Set dest to x*Q, where -8 <= x <= 8, in constant time.
|
||||
func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
|
||||
// Compute xabs = |x|
|
||||
xmask := x >> 7
|
||||
xabs := uint8((x + xmask) ^ xmask)
|
||||
|
||||
dest.Zero()
|
||||
for j := 1; j <= 8; j++ {
|
||||
// Set dest = j*Q if |x| = j
|
||||
cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
|
||||
dest.Select(&v.points[j-1], dest, cond)
|
||||
}
|
||||
// Now dest = |x|*Q, conditionally negate to get x*Q
|
||||
dest.CondNeg(int(xmask & 1))
|
||||
}
|
||||
|
||||
// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
|
||||
func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
|
||||
*dest = v.points[x/2]
|
||||
}
|
||||
|
||||
// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
|
||||
func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
|
||||
*dest = v.points[x/2]
|
||||
}
|
||||
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
freebsd_task:
|
||||
name: 'FreeBSD'
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-1
|
||||
install_script:
|
||||
- pkg update -f
|
||||
- pkg install -y go
|
||||
test_script:
|
||||
# run tests as user "cirrus" instead of root
|
||||
- pw useradd cirrus -m
|
||||
- chown -R cirrus:cirrus .
|
||||
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
|
||||
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...
|
||||
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
10
vendor/github.com/fsnotify/fsnotify/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# go test -c output
|
||||
*.test
|
||||
*.test.exe
|
||||
|
||||
# Output of go build ./cmd/fsnotify
|
||||
/fsnotify
|
||||
/fsnotify.exe
|
||||
|
||||
/test/kqueue
|
||||
/test/a.out
|
||||
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
||||
569
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
569
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,569 @@
|
||||
# Changelog
|
||||
|
||||
1.8.0 2023-10-31
|
||||
----------------
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
|
||||
|
||||
- kqueue: ignore events with Ident=0 ([#590])
|
||||
|
||||
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
|
||||
|
||||
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
|
||||
|
||||
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
|
||||
|
||||
- inotify: fix panic when calling Remove() in a goroutine ([#650])
|
||||
|
||||
- fen: allow watching subdirectories of watched directories ([#621])
|
||||
|
||||
[#590]: https://github.com/fsnotify/fsnotify/pull/590
|
||||
[#610]: https://github.com/fsnotify/fsnotify/pull/610
|
||||
[#617]: https://github.com/fsnotify/fsnotify/pull/617
|
||||
[#619]: https://github.com/fsnotify/fsnotify/pull/619
|
||||
[#620]: https://github.com/fsnotify/fsnotify/pull/620
|
||||
[#621]: https://github.com/fsnotify/fsnotify/pull/621
|
||||
[#625]: https://github.com/fsnotify/fsnotify/pull/625
|
||||
[#650]: https://github.com/fsnotify/fsnotify/pull/650
|
||||
|
||||
1.7.0 - 2023-10-22
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.17.
|
||||
|
||||
### Additions
|
||||
|
||||
- illumos: add FEN backend to support illumos and Solaris. ([#371])
|
||||
|
||||
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
|
||||
in cases where you can't control the kernel buffer and receive a large number
|
||||
of events in bursts. ([#550], [#572])
|
||||
|
||||
- all: add `AddWith()`, which is identical to `Add()` but allows passing
|
||||
options. ([#521])
|
||||
|
||||
- windows: allow setting the ReadDirectoryChangesW() buffer size with
|
||||
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
|
||||
works on all platforms and is enough for most purposes, but in some cases a
|
||||
highest buffer is needed. ([#521])
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: remove watcher if a watched path is renamed ([#518])
|
||||
|
||||
After a rename the reported name wasn't updated, or even an empty string.
|
||||
Inotify doesn't provide any good facilities to update it, so just remove the
|
||||
watcher. This is already how it worked on kqueue and FEN.
|
||||
|
||||
On Windows this does work, and remains working.
|
||||
|
||||
- windows: don't listen for file attribute changes ([#520])
|
||||
|
||||
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
|
||||
with no way to see if they're a file write or attribute change, so would show
|
||||
up as a fsnotify.Write event. This is never useful, and could result in many
|
||||
spurious Write events.
|
||||
|
||||
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
|
||||
|
||||
Before it would merely return "short read", making it hard to detect this
|
||||
error.
|
||||
|
||||
- kqueue: make sure events for all files are delivered properly when removing a
|
||||
watched directory ([#526])
|
||||
|
||||
Previously they would get sent with `""` (empty string) or `"."` as the path
|
||||
name.
|
||||
|
||||
- kqueue: don't emit spurious Create events for symbolic links ([#524])
|
||||
|
||||
The link would get resolved but kqueue would "forget" it already saw the link
|
||||
itself, resulting on a Create for every Write event for the directory.
|
||||
|
||||
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
|
||||
|
||||
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
|
||||
`backend_other.go`, making it easier to use on unsupported platforms such as
|
||||
WASM, AIX, etc. ([#528])
|
||||
|
||||
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
|
||||
Google AppEngine forbids usage of the unsafe package so the inotify backend
|
||||
won't compile there.
|
||||
|
||||
[#371]: https://github.com/fsnotify/fsnotify/pull/371
|
||||
[#516]: https://github.com/fsnotify/fsnotify/pull/516
|
||||
[#518]: https://github.com/fsnotify/fsnotify/pull/518
|
||||
[#520]: https://github.com/fsnotify/fsnotify/pull/520
|
||||
[#521]: https://github.com/fsnotify/fsnotify/pull/521
|
||||
[#524]: https://github.com/fsnotify/fsnotify/pull/524
|
||||
[#525]: https://github.com/fsnotify/fsnotify/pull/525
|
||||
[#526]: https://github.com/fsnotify/fsnotify/pull/526
|
||||
[#528]: https://github.com/fsnotify/fsnotify/pull/528
|
||||
[#537]: https://github.com/fsnotify/fsnotify/pull/537
|
||||
[#550]: https://github.com/fsnotify/fsnotify/pull/550
|
||||
[#572]: https://github.com/fsnotify/fsnotify/pull/572
|
||||
|
||||
1.6.0 - 2022-10-13
|
||||
------------------
|
||||
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
|
||||
but not documented). It also increases the minimum Linux version to 2.6.32.
|
||||
|
||||
### Additions
|
||||
|
||||
- all: add `Event.Has()` and `Op.Has()` ([#477])
|
||||
|
||||
This makes checking events a lot easier; for example:
|
||||
|
||||
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
}
|
||||
|
||||
Becomes:
|
||||
|
||||
if event.Has(Write) && !event.Has(Remove) {
|
||||
}
|
||||
|
||||
- all: add cmd/fsnotify ([#463])
|
||||
|
||||
A command-line utility for testing and some examples.
|
||||
|
||||
### Changes and fixes
|
||||
|
||||
- inotify: don't ignore events for files that don't exist ([#260], [#470])
|
||||
|
||||
Previously the inotify watcher would call `os.Lstat()` to check if a file
|
||||
still exists before emitting events.
|
||||
|
||||
This was inconsistent with other platforms and resulted in inconsistent event
|
||||
reporting (e.g. when a file is quickly removed and re-created), and generally
|
||||
a source of confusion. It was added in 2013 to fix a memory leak that no
|
||||
longer exists.
|
||||
|
||||
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
|
||||
not watched ([#460])
|
||||
|
||||
- inotify: replace epoll() with non-blocking inotify ([#434])
|
||||
|
||||
Non-blocking inotify was not generally available at the time this library was
|
||||
written in 2014, but now it is. As a result, the minimum Linux version is
|
||||
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
|
||||
|
||||
- kqueue: don't check for events every 100ms ([#480])
|
||||
|
||||
The watcher would wake up every 100ms, even when there was nothing to do. Now
|
||||
it waits until there is something to do.
|
||||
|
||||
- macos: retry opening files on EINTR ([#475])
|
||||
|
||||
- kqueue: skip unreadable files ([#479])
|
||||
|
||||
kqueue requires a file descriptor for every file in a directory; this would
|
||||
fail if a file was unreadable by the current user. Now these files are simply
|
||||
skipped.
|
||||
|
||||
- windows: fix renaming a watched directory if the parent is also watched ([#370])
|
||||
|
||||
- windows: increase buffer size from 4K to 64K ([#485])
|
||||
|
||||
- windows: close file handle on Remove() ([#288])
|
||||
|
||||
- kqueue: put pathname in the error if watching a file fails ([#471])
|
||||
|
||||
- inotify, windows: calling Close() more than once could race ([#465])
|
||||
|
||||
- kqueue: improve Close() performance ([#233])
|
||||
|
||||
- all: various documentation additions and clarifications.
|
||||
|
||||
[#233]: https://github.com/fsnotify/fsnotify/pull/233
|
||||
[#260]: https://github.com/fsnotify/fsnotify/pull/260
|
||||
[#288]: https://github.com/fsnotify/fsnotify/pull/288
|
||||
[#370]: https://github.com/fsnotify/fsnotify/pull/370
|
||||
[#434]: https://github.com/fsnotify/fsnotify/pull/434
|
||||
[#460]: https://github.com/fsnotify/fsnotify/pull/460
|
||||
[#463]: https://github.com/fsnotify/fsnotify/pull/463
|
||||
[#465]: https://github.com/fsnotify/fsnotify/pull/465
|
||||
[#470]: https://github.com/fsnotify/fsnotify/pull/470
|
||||
[#471]: https://github.com/fsnotify/fsnotify/pull/471
|
||||
[#475]: https://github.com/fsnotify/fsnotify/pull/475
|
||||
[#477]: https://github.com/fsnotify/fsnotify/pull/477
|
||||
[#479]: https://github.com/fsnotify/fsnotify/pull/479
|
||||
[#480]: https://github.com/fsnotify/fsnotify/pull/480
|
||||
[#485]: https://github.com/fsnotify/fsnotify/pull/485
|
||||
|
||||
## [1.5.4] - 2022-04-25
|
||||
|
||||
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
|
||||
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
|
||||
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
|
||||
|
||||
## [1.5.3] - 2022-04-22
|
||||
|
||||
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
|
||||
|
||||
## [1.5.2] - 2022-04-21
|
||||
|
||||
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
|
||||
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
|
||||
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
|
||||
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
|
||||
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.9] - 2020-03-11
|
||||
|
||||
* Move example usage to the readme #329. This may resolve #328.
|
||||
|
||||
## [1.4.8] - 2020-03-10
|
||||
|
||||
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
|
||||
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
|
||||
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
|
||||
* CI: Less verbosity (@nathany #267)
|
||||
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
|
||||
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
|
||||
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
|
||||
* CI: Add windows to travis matrix (@cpuguy83 #284)
|
||||
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
|
||||
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
|
||||
* Linux: open files with close-on-exec (@linxiulei #273)
|
||||
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
|
||||
* Project: Add go.mod (@nathany #309)
|
||||
* Project: Revise editor config (@nathany #309)
|
||||
* Project: Update copyright for 2019 (@nathany #309)
|
||||
* CI: Drop go1.8 from CI matrix (@nathany #309)
|
||||
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
* Linux: Fix deadlock in Remove (thanks @aarondl)
|
||||
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
|
||||
* Docs: Moved FAQ into the README (thanks @vahe)
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## [1.4.2] - 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## [1.4.1] - 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## [1.4.0] - 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## [1.3.1] - 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## [1.3.0] - 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## [1.2.10] - 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## [1.2.9] - 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## [1.2.8] - 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## [1.2.5] - 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## [1.2.1] - 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## [1.2.0] - 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## [1.1.1] - 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## [1.1.0] - 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [1.0.4] - 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## [1.0.3] - 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## [1.0.2] - 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## [1.0.0] - 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## [0.9.3] - 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## [0.9.2] - 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## [0.9.1] - 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## [0.9.0] - 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## [0.8.12] - 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## [0.8.11] - 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## [0.8.10] - 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## [0.8.9] - 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## [0.8.8] - 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## [0.8.7] - 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## [0.8.6] - 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## [0.8.5] - 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## [0.8.4] - 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## [0.8.3] - 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## [0.8.2] - 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## [0.8.1] - 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## [0.8.0] - 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## [0.7.4] - 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## [0.7.3] - 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## [0.7.2] - 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## [0.7.1] - 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## [0.7.0] - 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## [0.6.0] - 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## [0.5.1] - 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## [0.5.0] - 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## [0.4.0] - 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## [0.3.0] - 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## [0.2.0] - 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## [0.1.0] - 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
144
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
144
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
Thank you for your interest in contributing to fsnotify! We try to review and
|
||||
merge PRs in a reasonable timeframe, but please be aware that:
|
||||
|
||||
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
|
||||
can just send PRs, but they may end up being rejected for one reason or the
|
||||
other.
|
||||
|
||||
- fsnotify is a cross-platform library, and changes must work reasonably well on
|
||||
all supported platforms.
|
||||
|
||||
- Changes will need to be compatible; old code should still compile, and the
|
||||
runtime behaviour can't change in ways that are likely to lead to problems for
|
||||
users.
|
||||
|
||||
Testing
|
||||
-------
|
||||
Just `go test ./...` runs all the tests; the CI runs this on all supported
|
||||
platforms. Testing different platforms locally can be done with something like
|
||||
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
|
||||
|
||||
Use the `-short` flag to make the "stress test" run faster.
|
||||
|
||||
Writing new tests
|
||||
-----------------
|
||||
Scripts in the testdata directory allow creating test cases in a "shell-like"
|
||||
syntax. The basic format is:
|
||||
|
||||
script
|
||||
|
||||
Output:
|
||||
desired output
|
||||
|
||||
For example:
|
||||
|
||||
# Create a new empty file with some data.
|
||||
watch /
|
||||
echo data >/file
|
||||
|
||||
Output:
|
||||
create /file
|
||||
write /file
|
||||
|
||||
Just create a new file to add a new test; select which tests to run with
|
||||
`-run TestScript/[path]`.
|
||||
|
||||
script
|
||||
------
|
||||
The script is a "shell-like" script:
|
||||
|
||||
cmd arg arg
|
||||
|
||||
Comments are supported with `#`:
|
||||
|
||||
# Comment
|
||||
cmd arg arg # Comment
|
||||
|
||||
All operations are done in a temp directory; a path like "/foo" is rewritten to
|
||||
"/tmp/TestFoo/foo".
|
||||
|
||||
Arguments can be quoted with `"` or `'`; there are no escapes and they're
|
||||
functionally identical right now, but this may change in the future, so best to
|
||||
assume shell-like rules.
|
||||
|
||||
touch "/file with spaces"
|
||||
|
||||
End-of-line escapes with `\` are not supported.
|
||||
|
||||
### Supported commands
|
||||
|
||||
watch path [ops] # Watch the path, reporting events for it. Nothing is
|
||||
# watched by default. Optionally a list of ops can be
|
||||
# given, as with AddWith(path, WithOps(...)).
|
||||
unwatch path # Stop watching the path.
|
||||
watchlist n # Assert watchlist length.
|
||||
|
||||
stop # Stop running the script; for debugging.
|
||||
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
|
||||
parallel by default, so -parallel=1 is probably a good
|
||||
idea).
|
||||
|
||||
touch path
|
||||
mkdir [-p] dir
|
||||
ln -s target link # Only ln -s supported.
|
||||
mkfifo path
|
||||
mknod dev path
|
||||
mv src dst
|
||||
rm [-r] path
|
||||
chmod mode path # Octal only
|
||||
sleep time-in-ms
|
||||
|
||||
cat path # Read path (does nothing with the data; just reads it).
|
||||
echo str >>path # Append "str" to "path".
|
||||
echo str >path # Truncate "path" and write "str".
|
||||
|
||||
require reason # Skip the test if "reason" is true; "skip" and
|
||||
skip reason # "require" behave identical; it supports both for
|
||||
# readability. Possible reasons are:
|
||||
#
|
||||
# always Always skip this test.
|
||||
# symlink Symlinks are supported (requires admin
|
||||
# permissions on Windows).
|
||||
# mkfifo Platform doesn't support FIFO named sockets.
|
||||
# mknod Platform doesn't support device nodes.
|
||||
|
||||
|
||||
output
|
||||
------
|
||||
After `Output:` the desired output is given; this is indented by convention, but
|
||||
that's not required.
|
||||
|
||||
The format of that is:
|
||||
|
||||
# Comment
|
||||
event path # Comment
|
||||
|
||||
system:
|
||||
event path
|
||||
system2:
|
||||
event path
|
||||
|
||||
Every event is one line, and any whitespace between the event and path are
|
||||
ignored. The path can optionally be surrounded in ". Anything after a "#" is
|
||||
ignored.
|
||||
|
||||
Platform-specific tests can be added after GOOS; for example:
|
||||
|
||||
watch /
|
||||
touch /file
|
||||
|
||||
Output:
|
||||
# Tested if nothing else matches
|
||||
create /file
|
||||
|
||||
# Windows-specific test.
|
||||
windows:
|
||||
write /file
|
||||
|
||||
You can specify multiple platforms with a comma (e.g. "windows, linux:").
|
||||
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
|
||||
|
||||
|
||||
[goon]: https://github.com/arp242/goon
|
||||
[Vagrant]: https://www.vagrantup.com/
|
||||
[integration_test.go]: /integration_test.go
|
||||
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright © 2012 The Go Authors. All rights reserved.
|
||||
Copyright © fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Neither the name of Google Inc. nor the names of its contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
184
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
184
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
fsnotify is a Go library to provide cross-platform filesystem notifications on
|
||||
Windows, Linux, macOS, BSD, and illumos.
|
||||
|
||||
Go 1.17 or newer is required; the full documentation is at
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
---
|
||||
|
||||
Platform support:
|
||||
|
||||
| Backend | OS | Status |
|
||||
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
|
||||
| inotify | Linux | Supported |
|
||||
| kqueue | BSD, macOS | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FEN | illumos | Supported |
|
||||
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment |
|
||||
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
|
||||
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
|
||||
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
Linux and illumos should include Android and Solaris, but these are currently
|
||||
untested.
|
||||
|
||||
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
|
||||
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
|
||||
[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129
|
||||
|
||||
Usage
|
||||
-----
|
||||
A basic example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create new watcher.
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
// Start listening for events.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Has(fsnotify.Write) {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Add a path.
|
||||
err = watcher.Add("/tmp")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Block main goroutine forever.
|
||||
<-make(chan struct{})
|
||||
}
|
||||
```
|
||||
|
||||
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
|
||||
run with:
|
||||
|
||||
% go run ./cmd/fsnotify
|
||||
|
||||
Further detailed documentation can be found in godoc:
|
||||
https://pkg.go.dev/github.com/fsnotify/fsnotify
|
||||
|
||||
FAQ
|
||||
---
|
||||
### Will a file still be watched when it's moved to another directory?
|
||||
No, not unless you are watching the location it was moved to.
|
||||
|
||||
### Are subdirectories watched?
|
||||
No, you must add watches for any directory you want to watch (a recursive
|
||||
watcher is on the roadmap: [#18]).
|
||||
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
|
||||
### Do I have to watch the Error and Event channels in a goroutine?
|
||||
Yes. You can read both channels in the same goroutine using `select` (you don't
|
||||
need a separate goroutine for both channels; see the example).
|
||||
|
||||
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
|
||||
fsnotify requires support from underlying OS to work. The current NFS and SMB
|
||||
protocols does not provide network level support for file notifications, and
|
||||
neither do the /proc and /sys virtual filesystems.
|
||||
|
||||
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
|
||||
|
||||
[#9]: https://github.com/fsnotify/fsnotify/issues/9
|
||||
|
||||
### Why do I get many Chmod events?
|
||||
Some programs may generate a lot of attribute changes; for example Spotlight on
|
||||
macOS, anti-virus programs, backup applications, and some others are known to do
|
||||
this. As a rule, it's typically best to ignore Chmod events. They're often not
|
||||
useful, and tend to cause problems.
|
||||
|
||||
Spotlight indexing on macOS can result in multiple events (see [#15]). A
|
||||
temporary workaround is to add your folder(s) to the *Spotlight Privacy
|
||||
settings* until we have a native FSEvents implementation (see [#11]).
|
||||
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#15]: https://github.com/fsnotify/fsnotify/issues/15
|
||||
|
||||
### Watching a file doesn't work well
|
||||
Watching individual files (rather than directories) is generally not recommended
|
||||
as many programs (especially editors) update files atomically: it will write to
|
||||
a temporary file which is then moved to to destination, overwriting the original
|
||||
(or some variant thereof). The watcher on the original file is now lost, as that
|
||||
no longer exists.
|
||||
|
||||
The upshot of this is that a power failure or crash won't leave a half-written
|
||||
file.
|
||||
|
||||
Watch the parent directory and use `Event.Name` to filter out files you're not
|
||||
interested in. There is an example of this in `cmd/fsnotify/file.go`.
|
||||
|
||||
Platform-specific notes
|
||||
-----------------------
|
||||
### Linux
|
||||
When a file is removed a REMOVE event won't be emitted until all file
|
||||
descriptors are closed; it will emit a CHMOD instead:
|
||||
|
||||
fp := os.Open("file")
|
||||
os.Remove("file") // CHMOD
|
||||
fp.Close() // REMOVE
|
||||
|
||||
This is the event that inotify sends, so not much can be changed about this.
|
||||
|
||||
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
|
||||
the number of watches per user, and `fs.inotify.max_user_instances` specifies
|
||||
the maximum number of inotify instances per user. Every Watcher you create is an
|
||||
"instance", and every path you add is a "watch".
|
||||
|
||||
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
|
||||
`/proc/sys/fs/inotify/max_user_instances`
|
||||
|
||||
To increase them you can use `sysctl` or write the value to proc file:
|
||||
|
||||
# The default values on Linux 5.18
|
||||
sysctl fs.inotify.max_user_watches=124983
|
||||
sysctl fs.inotify.max_user_instances=128
|
||||
|
||||
To make the changes persist on reboot edit `/etc/sysctl.conf` or
|
||||
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
|
||||
distro's documentation):
|
||||
|
||||
fs.inotify.max_user_watches=124983
|
||||
fs.inotify.max_user_instances=128
|
||||
|
||||
Reaching the limit will result in a "no space left on device" or "too many open
|
||||
files" error.
|
||||
|
||||
### kqueue (macOS, all BSD systems)
|
||||
kqueue requires opening a file descriptor for every file that's being watched;
|
||||
so if you're watching a directory with five files then that's six file
|
||||
descriptors. You will run in to your system's "max open files" limit faster on
|
||||
these platforms.
|
||||
|
||||
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
|
||||
control the maximum number of open files.
|
||||
484
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
484
vendor/github.com/fsnotify/fsnotify/backend_fen.go
generated
vendored
Normal file
@@ -0,0 +1,484 @@
|
||||
//go:build solaris
|
||||
|
||||
// FEN backend for illumos (supported) and Solaris (untested, but should work).
|
||||
//
|
||||
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fen struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
mu sync.Mutex
|
||||
port *unix.EventPort
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
dirs map[string]Op // Explicitly watched directories
|
||||
watches map[string]Op // Explicitly watched non-directories
|
||||
}
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(0, ev, errs)
|
||||
}
|
||||
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
w := &fen{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
dirs: make(map[string]Op),
|
||||
watches: make(map[string]Op),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
var err error
|
||||
w.port, err = unix.NewEventPort()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// sendEvent attempts to send an event to the user, returning true if the event
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *fen) sendEvent(name string, op Op) (sent bool) {
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- Event{Name: name, Op: op}:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// sendError attempts to send an error to the user, returning true if the error
|
||||
// was put in the channel successfully and false if the watcher has been closed.
|
||||
func (w *fen) sendError(err error) (sent bool) {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) Close() error {
|
||||
// Take the lock used by associateFile to prevent lingering events from
|
||||
// being processed after the close
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
return w.port.Close()
|
||||
}
|
||||
|
||||
func (w *fen) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *fen) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
// Currently we resolve symlinks that were explicitly requested to be
|
||||
// watched. Otherwise we would use LStat here.
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Associate all files in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, true, w.associateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.dirs[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.associateFile(name, stat, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = with.op
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if !w.port.PathIsWatched(name) {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
// The user has expressed an intent. Immediately remove this name from
|
||||
// whichever watch list it might be in. If it's not in there the delete
|
||||
// doesn't cause harm.
|
||||
w.mu.Lock()
|
||||
delete(w.watches, name)
|
||||
delete(w.dirs, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
stat, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove associations for every file in the directory.
|
||||
if stat.IsDir() {
|
||||
err := w.handleDirectory(name, stat, false, w.dissociateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err = w.port.DissociatePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents contains the main loop that runs in a goroutine watching for events.
|
||||
func (w *fen) readEvents() {
|
||||
// If this function returns, the watcher has been closed and we can close
|
||||
// these channels
|
||||
defer func() {
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
pevents := make([]unix.PortEvent, 8)
|
||||
for {
|
||||
count, err := w.port.Get(pevents, 1, nil)
|
||||
if err != nil && err != unix.ETIME {
|
||||
// Interrupted system call (count should be 0) ignore and continue
|
||||
if errors.Is(err, unix.EINTR) && count == 0 {
|
||||
continue
|
||||
}
|
||||
// Get failed because we called w.Close()
|
||||
if errors.Is(err, unix.EBADF) && w.isClosed() {
|
||||
return
|
||||
}
|
||||
// There was an error not caused by calling w.Close()
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p := pevents[:count]
|
||||
for _, pevent := range p {
|
||||
if pevent.Source != unix.PORT_SOURCE_FILE {
|
||||
// Event from unexpected source received; should never happen.
|
||||
if !w.sendError(errors.New("Event from unexpected source received")) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(pevent.Path, pevent.Events)
|
||||
}
|
||||
|
||||
err = w.handleEvent(&pevent)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle all children of the directory.
|
||||
for _, entry := range files {
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// And finally handle the directory itself.
|
||||
return handler(path, stat, follow)
|
||||
}
|
||||
|
||||
// handleEvent might need to emit more than one fsnotify event if the events
|
||||
// bitmap matches more than one event type (e.g. the file was both modified and
|
||||
// had the attributes changed between when the association was created and the
|
||||
// when event was returned)
|
||||
func (w *fen) handleEvent(event *unix.PortEvent) error {
|
||||
var (
|
||||
events = event.Events
|
||||
path = event.Path
|
||||
fmode = event.Cookie.(os.FileMode)
|
||||
reRegister = true
|
||||
)
|
||||
|
||||
w.mu.Lock()
|
||||
_, watchedDir := w.dirs[path]
|
||||
_, watchedPath := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
isWatched := watchedDir || watchedPath
|
||||
|
||||
if events&unix.FILE_DELETE != 0 {
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_FROM != 0 {
|
||||
if !w.sendEvent(path, Rename) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the new file name
|
||||
reRegister = false
|
||||
}
|
||||
if events&unix.FILE_RENAME_TO != 0 {
|
||||
// We don't report a Rename event for this case, because Rename events
|
||||
// are interpreted as referring to the _old_ name of the file, and in
|
||||
// this case the event would refer to the new name of the file. This
|
||||
// type of rename event is not supported by fsnotify.
|
||||
|
||||
// inotify reports a Remove event in this case, so we simulate this
|
||||
// here.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Don't keep watching the file that was removed
|
||||
reRegister = false
|
||||
}
|
||||
|
||||
// The file is gone, nothing left to do.
|
||||
if !reRegister {
|
||||
if watchedDir {
|
||||
w.mu.Lock()
|
||||
delete(w.dirs, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
if watchedPath {
|
||||
w.mu.Lock()
|
||||
delete(w.watches, path)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we didn't get a deletion the file still exists and we're going to have
|
||||
// to watch it again. Let's Stat it now so that we can compare permissions
|
||||
// and have what we need to continue watching the file
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
// This is unexpected, but we should still emit an event. This happens
|
||||
// most often on "rm -r" of a subdirectory inside a watched directory We
|
||||
// get a modify event of something happening inside, but by the time we
|
||||
// get here, the sudirectory is already gone. Clearly we were watching
|
||||
// this path but now it is gone. Let's tell the user that it was
|
||||
// removed.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Suppress extra write events on removed directories; they are not
|
||||
// informative and can be confusing.
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolve symlinks that were explicitly watched as we would have at Add()
|
||||
// time. this helps suppress spurious Chmod events on watched symlinks
|
||||
if isWatched {
|
||||
stat, err = os.Stat(path)
|
||||
if err != nil {
|
||||
// The symlink still exists, but the target is gone. Report the
|
||||
// Remove similar to above.
|
||||
if !w.sendEvent(path, Remove) {
|
||||
return nil
|
||||
}
|
||||
// Don't return the error
|
||||
}
|
||||
}
|
||||
|
||||
if events&unix.FILE_MODIFIED != 0 {
|
||||
if fmode.IsDir() && watchedDir {
|
||||
if err := w.updateDirectory(path); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !w.sendEvent(path, Write) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if events&unix.FILE_ATTRIB != 0 && stat != nil {
|
||||
// Only send Chmod if perms changed
|
||||
if stat.Mode().Perm() != fmode.Perm() {
|
||||
if !w.sendEvent(path, Chmod) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stat != nil {
|
||||
// If we get here, it means we've hit an event above that requires us to
|
||||
// continue watching the file or directory
|
||||
return w.associateFile(path, stat, isWatched)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) updateDirectory(path string) error {
|
||||
// The directory was modified, so we must find unwatched entities and watch
|
||||
// them. If something was removed from the directory, nothing will happen,
|
||||
// as everything else should still be watched.
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range files {
|
||||
path := filepath.Join(path, entry.Name())
|
||||
if w.port.PathIsWatched(path) {
|
||||
continue
|
||||
}
|
||||
|
||||
finfo, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.associateFile(path, finfo, false)
|
||||
if !w.sendError(err) {
|
||||
return nil
|
||||
}
|
||||
if !w.sendEvent(path, Create) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
// This is primarily protecting the call to AssociatePath but it is
|
||||
// important and intentional that the call to PathIsWatched is also
|
||||
// protected by this mutex. Without this mutex, AssociatePath has been seen
|
||||
// to error out that the path is already associated.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
if w.port.PathIsWatched(path) {
|
||||
// Remove the old association in favor of this one If we get ENOENT,
|
||||
// then while the x/sys/unix wrapper still thought that this path was
|
||||
// associated, the underlying event port did not. This call will have
|
||||
// cleared up that discrepancy. The most likely cause is that the event
|
||||
// has fired but we haven't processed it yet.
|
||||
err := w.port.DissociatePath(path)
|
||||
if err != nil && !errors.Is(err, unix.ENOENT) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var events int
|
||||
if !follow {
|
||||
// Watch symlinks themselves rather than their targets unless this entry
|
||||
// is explicitly watched.
|
||||
events |= unix.FILE_NOFOLLOW
|
||||
}
|
||||
if true { // TODO: implement withOps()
|
||||
events |= unix.FILE_MODIFIED
|
||||
}
|
||||
if true {
|
||||
events |= unix.FILE_ATTRIB
|
||||
}
|
||||
return w.port.AssociatePath(path, stat, events, stat.Mode())
|
||||
}
|
||||
|
||||
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
|
||||
if !w.port.PathIsWatched(path) {
|
||||
return nil
|
||||
}
|
||||
return w.port.DissociatePath(path)
|
||||
}
|
||||
|
||||
func (w *fen) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches)+len(w.dirs))
|
||||
for pathname := range w.dirs {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
for pathname := range w.watches {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func (w *fen) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
658
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
658
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
generated
vendored
Normal file
@@ -0,0 +1,658 @@
|
||||
//go:build linux && !appengine
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type inotify struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
// Store fd here as os.File.Read() will no longer return on close after
|
||||
// calling Fd(). See: https://github.com/golang/go/issues/26439
|
||||
fd int
|
||||
inotifyFile *os.File
|
||||
watches *watches
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneMu sync.Mutex
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
|
||||
// Store rename cookies in an array, with the index wrapping to 0. Almost
|
||||
// all of the time what we get is a MOVED_FROM to set the cookie and the
|
||||
// next event inotify sends will be MOVED_TO to read it. However, this is
|
||||
// not guaranteed – as described in inotify(7) – and we may get other events
|
||||
// between the two MOVED_* events (including other MOVED_* ones).
|
||||
//
|
||||
// A second issue is that moving a file outside the watched directory will
|
||||
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
|
||||
// read and delete it. So just storing it in a map would slowly leak memory.
|
||||
//
|
||||
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
|
||||
// Ten items should be more than enough for our purpose, and a loop over
|
||||
// such a short array is faster than a map access anyway (not that it hugely
|
||||
// matters since we're talking about hundreds of ns at the most, but still).
|
||||
cookies [10]koekje
|
||||
cookieIndex uint8
|
||||
cookiesMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[uint32]*watch // wd → watch
|
||||
path map[string]uint32 // pathname → wd
|
||||
}
|
||||
watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
path string // Watch path.
|
||||
recurse bool // Recursion with ./...?
|
||||
}
|
||||
koekje struct {
|
||||
cookie uint32
|
||||
path string
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[uint32]*watch),
|
||||
path: make(map[string]uint32),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) len() int {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return len(w.wd)
|
||||
}
|
||||
|
||||
func (w *watches) add(ww *watch) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.wd[ww.wd] = ww
|
||||
w.path[ww.path] = ww.wd
|
||||
}
|
||||
|
||||
func (w *watches) remove(wd uint32) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch := w.wd[wd] // Could have had Remove() called. See #616.
|
||||
if watch == nil {
|
||||
return
|
||||
}
|
||||
delete(w.path, watch.path)
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
|
||||
func (w *watches) removePath(path string) ([]uint32, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
path, recurse := recursivePath(path)
|
||||
wd, ok := w.path[path]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
|
||||
}
|
||||
|
||||
watch := w.wd[wd]
|
||||
if recurse && !watch.recurse {
|
||||
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
|
||||
}
|
||||
|
||||
delete(w.path, path)
|
||||
delete(w.wd, wd)
|
||||
if !watch.recurse {
|
||||
return []uint32{wd}, nil
|
||||
}
|
||||
|
||||
wds := make([]uint32, 0, 8)
|
||||
wds = append(wds, wd)
|
||||
for p, rwd := range w.path {
|
||||
if filepath.HasPrefix(p, path) {
|
||||
delete(w.path, p)
|
||||
delete(w.wd, rwd)
|
||||
wds = append(wds, rwd)
|
||||
}
|
||||
}
|
||||
return wds, nil
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[w.path[path]]
|
||||
}
|
||||
|
||||
func (w *watches) byWd(wd uint32) *watch {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
return w.wd[wd]
|
||||
}
|
||||
|
||||
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
var existing *watch
|
||||
wd, ok := w.path[path]
|
||||
if ok {
|
||||
existing = w.wd[wd]
|
||||
}
|
||||
|
||||
upd, err := f(existing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if upd != nil {
|
||||
w.wd[upd.wd] = upd
|
||||
w.path[upd.path] = upd.wd
|
||||
|
||||
if upd.wd != wd {
|
||||
delete(w.wd, wd)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(0, ev, errs)
|
||||
}
|
||||
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
|
||||
// I/O operations won't terminate on close.
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
w := &inotify{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
fd: fd,
|
||||
inotifyFile: os.NewFile(uintptr(fd), ""),
|
||||
watches: newWatches(),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *inotify) sendEvent(e Event) bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- e:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *inotify) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *inotify) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *inotify) Close() error {
|
||||
w.doneMu.Lock()
|
||||
if w.isClosed() {
|
||||
w.doneMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
w.doneMu.Unlock()
|
||||
|
||||
// Causes any blocking reads to return with an error, provided the file
|
||||
// still supports deadline operations.
|
||||
err := w.inotifyFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *inotify) AddWith(path string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), path)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
path, recurse := recursivePath(path)
|
||||
if recurse {
|
||||
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() {
|
||||
if root == path {
|
||||
return fmt.Errorf("fsnotify: not a directory: %q", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a Create event when adding new directory from a recursive
|
||||
// watch; this is for "mkdir -p one/two/three". Usually all those
|
||||
// directories will be created before we can set up watchers on the
|
||||
// subdirectories, so only "one" would be sent as a Create event and
|
||||
// not "one/two" and "one/two/three" (inotifywait -r has the same
|
||||
// problem).
|
||||
if with.sendCreate && root != path {
|
||||
w.sendEvent(Event{Name: root, Op: Create})
|
||||
}
|
||||
|
||||
return w.add(root, with, true)
|
||||
})
|
||||
}
|
||||
|
||||
return w.add(path, with, false)
|
||||
}
|
||||
|
||||
func (w *inotify) add(path string, with withOpts, recurse bool) error {
|
||||
var flags uint32
|
||||
if with.noFollow {
|
||||
flags |= unix.IN_DONT_FOLLOW
|
||||
}
|
||||
if with.op.Has(Create) {
|
||||
flags |= unix.IN_CREATE
|
||||
}
|
||||
if with.op.Has(Write) {
|
||||
flags |= unix.IN_MODIFY
|
||||
}
|
||||
if with.op.Has(Remove) {
|
||||
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
}
|
||||
if with.op.Has(Rename) {
|
||||
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
|
||||
}
|
||||
if with.op.Has(Chmod) {
|
||||
flags |= unix.IN_ATTRIB
|
||||
}
|
||||
if with.op.Has(xUnportableOpen) {
|
||||
flags |= unix.IN_OPEN
|
||||
}
|
||||
if with.op.Has(xUnportableRead) {
|
||||
flags |= unix.IN_ACCESS
|
||||
}
|
||||
if with.op.Has(xUnportableCloseWrite) {
|
||||
flags |= unix.IN_CLOSE_WRITE
|
||||
}
|
||||
if with.op.Has(xUnportableCloseRead) {
|
||||
flags |= unix.IN_CLOSE_NOWRITE
|
||||
}
|
||||
return w.register(path, flags, recurse)
|
||||
}
|
||||
|
||||
func (w *inotify) register(path string, flags uint32, recurse bool) error {
|
||||
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
|
||||
if existing != nil {
|
||||
flags |= existing.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
|
||||
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return &watch{
|
||||
wd: uint32(wd),
|
||||
path: path,
|
||||
flags: flags,
|
||||
recurse: recurse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
existing.wd = uint32(wd)
|
||||
existing.flags = flags
|
||||
return existing, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *inotify) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
return w.remove(filepath.Clean(name))
|
||||
}
|
||||
|
||||
func (w *inotify) remove(name string) error {
|
||||
wds, err := w.watches.removePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, wd := range wds {
|
||||
_, err := unix.InotifyRmWatch(w.fd, wd)
|
||||
if err != nil {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every
|
||||
// case; the only two possible errors are:
|
||||
//
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of
|
||||
// any kind.
|
||||
//
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is
|
||||
// not a valid watch descriptor. Watch descriptors are invalidated
|
||||
// when they are removed explicitly or implicitly; explicitly by
|
||||
// inotify_rm_watch, implicitly when the file they are watching is
|
||||
// deleted.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *inotify) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
entries := make([]string, 0, w.watches.len())
|
||||
w.watches.mu.RLock()
|
||||
for pathname := range w.watches.path {
|
||||
entries = append(entries, pathname)
|
||||
}
|
||||
w.watches.mu.RUnlock()
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *inotify) readEvents() {
|
||||
defer func() {
|
||||
close(w.doneResp)
|
||||
close(w.Errors)
|
||||
close(w.Events)
|
||||
}()
|
||||
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
errno error // Syscall errno
|
||||
)
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := w.inotifyFile.Read(buf[:])
|
||||
switch {
|
||||
case errors.Unwrap(err) == os.ErrClosed:
|
||||
return
|
||||
case err != nil:
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
err = io.EOF // If EOF is received. This should really never happen.
|
||||
} else if n < 0 {
|
||||
err = errno // If an error occurred while reading.
|
||||
} else {
|
||||
err = errors.New("notify: short read in readEvents()") // Read was too short.
|
||||
}
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
var offset uint32
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
var (
|
||||
// Point "raw" to the event in the buffer
|
||||
raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
mask = uint32(raw.Mask)
|
||||
nameLen = uint32(raw.Len)
|
||||
// Move to the next event in the buffer
|
||||
next = func() { offset += unix.SizeofInotifyEvent + nameLen }
|
||||
)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
if !w.sendError(ErrEventOverflow) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
/// If the event happened to the watched directory or the watched
|
||||
/// file, the kernel doesn't append the filename to the event, but
|
||||
/// we would like to always fill the the "Name" field with a valid
|
||||
/// filename. We retrieve the path of the watch from the "paths"
|
||||
/// map.
|
||||
watch := w.watches.byWd(uint32(raw.Wd))
|
||||
/// Can be nil if Remove() was called in another goroutine for this
|
||||
/// path inbetween reading the events from the kernel and reading
|
||||
/// the internal state. Not much we can do about it, so just skip.
|
||||
/// See #616.
|
||||
if watch == nil {
|
||||
next()
|
||||
continue
|
||||
}
|
||||
|
||||
name := watch.path
|
||||
if nameLen > 0 {
|
||||
/// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
if debug {
|
||||
internal.Debug(name, raw.Mask, raw.Cookie)
|
||||
}
|
||||
|
||||
if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0
|
||||
next()
|
||||
continue
|
||||
}
|
||||
|
||||
// inotify will automatically remove the watch on deletes; just need
|
||||
// to clean our state here.
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
w.watches.remove(watch.wd)
|
||||
}
|
||||
|
||||
// We can't really update the state when a watched path is moved;
|
||||
// only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove
|
||||
// the watch.
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
|
||||
if watch.recurse {
|
||||
next() // Do nothing
|
||||
continue
|
||||
}
|
||||
|
||||
err := w.remove(watch.path)
|
||||
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Skip if we're watching both this path and the parent; the parent
|
||||
/// will already send a delete so no need to do it twice.
|
||||
if mask&unix.IN_DELETE_SELF != 0 {
|
||||
if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok {
|
||||
next()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ev := w.newEvent(name, mask, raw.Cookie)
|
||||
// Need to update watch path for recurse.
|
||||
if watch.recurse {
|
||||
isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR
|
||||
/// New directory created: set up watch on it.
|
||||
if isDir && ev.Has(Create) {
|
||||
err := w.register(ev.Name, watch.flags, true)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// This was a directory rename, so we need to update all
|
||||
// the children.
|
||||
//
|
||||
// TODO: this is of course pretty slow; we should use a
|
||||
// better data structure for storing all of this, e.g. store
|
||||
// children in the watch. I have some code for this in my
|
||||
// kqueue refactor we can use in the future. For now I'm
|
||||
// okay with this as it's not publicly available.
|
||||
// Correctness first, performance second.
|
||||
if ev.renamedFrom != "" {
|
||||
w.watches.mu.Lock()
|
||||
for k, ww := range w.watches.wd {
|
||||
if k == watch.wd || ww.path == ev.Name {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(ww.path, ev.renamedFrom) {
|
||||
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
|
||||
w.watches.wd[k] = ww
|
||||
}
|
||||
}
|
||||
w.watches.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Send the events that are not ignored on the events channel
|
||||
if !w.sendEvent(ev) {
|
||||
return
|
||||
}
|
||||
next()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *inotify) isRecursive(path string) bool {
|
||||
ww := w.watches.byPath(path)
|
||||
if ww == nil { // path could be a file, so also check the Dir.
|
||||
ww = w.watches.byPath(filepath.Dir(path))
|
||||
}
|
||||
return ww != nil && ww.recurse
|
||||
}
|
||||
|
||||
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_OPEN == unix.IN_OPEN {
|
||||
e.Op |= xUnportableOpen
|
||||
}
|
||||
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
|
||||
e.Op |= xUnportableRead
|
||||
}
|
||||
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
|
||||
e.Op |= xUnportableCloseWrite
|
||||
}
|
||||
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
|
||||
e.Op |= xUnportableCloseRead
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
|
||||
if cookie != 0 {
|
||||
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
w.cookiesMu.Lock()
|
||||
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
|
||||
w.cookieIndex++
|
||||
if w.cookieIndex > 9 {
|
||||
w.cookieIndex = 0
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
w.cookiesMu.Lock()
|
||||
var prev string
|
||||
for _, c := range w.cookies {
|
||||
if c.cookie == cookie {
|
||||
prev = c.path
|
||||
break
|
||||
}
|
||||
}
|
||||
w.cookiesMu.Unlock()
|
||||
e.renamedFrom = prev
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func (w *inotify) xSupports(op Op) bool {
|
||||
return true // Supports everything.
|
||||
}
|
||||
|
||||
func (w *inotify) state() {
|
||||
w.watches.mu.Lock()
|
||||
defer w.watches.mu.Unlock()
|
||||
for wd, ww := range w.watches.wd {
|
||||
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
|
||||
}
|
||||
}
|
||||
733
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
733
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,733 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type kqueue struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
closepipe [2]int // Pipe used for closing kq.
|
||||
watches *watches
|
||||
done chan struct{}
|
||||
doneMu sync.Mutex
|
||||
}
|
||||
|
||||
type (
|
||||
watches struct {
|
||||
mu sync.RWMutex
|
||||
wd map[int]watch // wd → watch
|
||||
path map[string]int // pathname → wd
|
||||
byDir map[string]map[int]struct{} // dirname(path) → wd
|
||||
seen map[string]struct{} // Keep track of if we know this file exists.
|
||||
byUser map[string]struct{} // Watches added with Watcher.Add()
|
||||
}
|
||||
watch struct {
|
||||
wd int
|
||||
name string
|
||||
linkName string // In case of links; name is the target, and this is the link.
|
||||
isDir bool
|
||||
dirFlags uint32
|
||||
}
|
||||
)
|
||||
|
||||
func newWatches() *watches {
|
||||
return &watches{
|
||||
wd: make(map[int]watch),
|
||||
path: make(map[string]int),
|
||||
byDir: make(map[string]map[int]struct{}),
|
||||
seen: make(map[string]struct{}),
|
||||
byUser: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) listPaths(userOnly bool) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
if userOnly {
|
||||
l := make([]string, 0, len(w.byUser))
|
||||
for p := range w.byUser {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
l := make([]string, 0, len(w.path))
|
||||
for p := range w.path {
|
||||
l = append(l, p)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (w *watches) watchesInDir(path string) []string {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
l := make([]string, 0, 4)
|
||||
for fd := range w.byDir[path] {
|
||||
info := w.wd[fd]
|
||||
if _, ok := w.byUser[info.name]; !ok {
|
||||
l = append(l, info.name)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Mark path as added by the user.
|
||||
func (w *watches) addUserWatch(path string) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.byUser[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) addLink(path string, fd int) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.seen[path] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.path[path] = fd
|
||||
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
byDir, ok := w.byDir[parent]
|
||||
if !ok {
|
||||
byDir = make(map[int]struct{}, 1)
|
||||
w.byDir[parent] = byDir
|
||||
}
|
||||
byDir[fd] = struct{}{}
|
||||
}
|
||||
|
||||
func (w *watches) byWd(fd int) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[fd]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) byPath(path string) (watch, bool) {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
info, ok := w.wd[w.path[path]]
|
||||
return info, ok
|
||||
}
|
||||
|
||||
func (w *watches) updateDirFlags(path string, flags uint32) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
fd := w.path[path]
|
||||
info := w.wd[fd]
|
||||
info.dirFlags = flags
|
||||
w.wd[fd] = info
|
||||
}
|
||||
|
||||
func (w *watches) remove(fd int, path string) bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
isDir := w.wd[fd].isDir
|
||||
delete(w.path, path)
|
||||
delete(w.byUser, path)
|
||||
|
||||
parent := filepath.Dir(path)
|
||||
delete(w.byDir[parent], fd)
|
||||
|
||||
if len(w.byDir[parent]) == 0 {
|
||||
delete(w.byDir, parent)
|
||||
}
|
||||
|
||||
delete(w.wd, fd)
|
||||
delete(w.seen, path)
|
||||
return isDir
|
||||
}
|
||||
|
||||
func (w *watches) markSeen(path string, exists bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
if exists {
|
||||
w.seen[path] = struct{}{}
|
||||
} else {
|
||||
delete(w.seen, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *watches) seenBefore(path string) bool {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
_, ok := w.seen[path]
|
||||
return ok
|
||||
}
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(0, ev, errs)
|
||||
}
|
||||
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
kq, closepipe, err := newKqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &kqueue{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
kq: kq,
|
||||
closepipe: closepipe,
|
||||
done: make(chan struct{}),
|
||||
watches: newWatches(),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// newKqueue creates a new kernel event queue and returns a descriptor.
|
||||
//
|
||||
// This registers a new event on closepipe, which will trigger an event when
|
||||
// it's closed. This way we can use kevent() without timeout/polling; without
|
||||
// the closepipe, it would block forever and we wouldn't be able to stop it at
|
||||
// all.
|
||||
func newKqueue() (kq int, closepipe [2]int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, closepipe, err
|
||||
}
|
||||
|
||||
// Register the close pipe.
|
||||
err = unix.Pipe(closepipe[:])
|
||||
if err != nil {
|
||||
unix.Close(kq)
|
||||
return kq, closepipe, err
|
||||
}
|
||||
unix.CloseOnExec(closepipe[0])
|
||||
unix.CloseOnExec(closepipe[1])
|
||||
|
||||
// Register changes to listen on the closepipe.
|
||||
changes := make([]unix.Kevent_t, 1)
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
|
||||
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
|
||||
|
||||
ok, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if ok == -1 {
|
||||
unix.Close(kq)
|
||||
unix.Close(closepipe[0])
|
||||
unix.Close(closepipe[1])
|
||||
return kq, closepipe, err
|
||||
}
|
||||
return kq, closepipe, nil
|
||||
}
|
||||
|
||||
// Returns true if the event was sent, or false if watcher is closed.
|
||||
func (w *kqueue) sendEvent(e Event) bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Events <- e:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *kqueue) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case <-w.done:
|
||||
return false
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *kqueue) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *kqueue) Close() error {
|
||||
w.doneMu.Lock()
|
||||
if w.isClosed() {
|
||||
w.doneMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
close(w.done)
|
||||
w.doneMu.Unlock()
|
||||
|
||||
pathsToRemove := w.watches.listPaths(false)
|
||||
for _, name := range pathsToRemove {
|
||||
w.Remove(name)
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine.
|
||||
unix.Close(w.closepipe[1])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.addUserWatch(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) Remove(name string) error {
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), name)
|
||||
}
|
||||
return w.remove(name, true)
|
||||
}
|
||||
|
||||
func (w *kqueue) remove(name string, unwatchFiles bool) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
info, ok := w.watches.byPath(name)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(info.wd)
|
||||
|
||||
isDir := w.watches.remove(info.wd, name)
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if unwatchFiles && isDir {
|
||||
pathsToRemove := w.watches.watchesInDir(name)
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error to
|
||||
// the user, as that will just confuse them with an error about a
|
||||
// path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
return w.watches.listPaths(true)
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// addWatch adds name to the watched file set; the flags are interpreted as
|
||||
// described in kevent(2).
|
||||
//
|
||||
// Returns the real path to the file which was added, with symlinks resolved.
|
||||
func (w *kqueue) addWatch(name string, flags uint32) (string, error) {
|
||||
if w.isClosed() {
|
||||
return "", ErrClosed
|
||||
}
|
||||
|
||||
name = filepath.Clean(name)
|
||||
|
||||
info, alreadyWatching := w.watches.byPath(name)
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets or named pipes.
|
||||
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow symlinks.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
link, err := os.Readlink(name)
|
||||
if err != nil {
|
||||
// Return nil because Linux can add unresolvable symlinks to the
|
||||
// watch list without problems, so maintain consistency with
|
||||
// that. There will be no file events for broken symlinks.
|
||||
// TODO: more specific check; returns os.PathError; ENOENT?
|
||||
return "", nil
|
||||
}
|
||||
|
||||
_, alreadyWatching = w.watches.byPath(link)
|
||||
if alreadyWatching {
|
||||
// Add to watches so we don't get spurious Create events later
|
||||
// on when we diff the directories.
|
||||
w.watches.addLink(name, 0)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
info.linkName = name
|
||||
name = link
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
// Retry on EINTR; open() can return EINTR in practice on macOS.
|
||||
// See #354, and Go issues 11180 and 39237.
|
||||
for {
|
||||
info.wd, err = unix.Open(name, openMode, 0)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if errors.Is(err, unix.EINTR) {
|
||||
continue
|
||||
}
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
info.isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
|
||||
if err != nil {
|
||||
unix.Close(info.wd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.watches.add(name, info.linkName, info.wd, info.isDir)
|
||||
}
|
||||
|
||||
// Watch the directory if it has not been watched before, or if it was
|
||||
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
if info.isDir {
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
w.watches.updateDirFlags(name, flags)
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *kqueue) readEvents() {
|
||||
defer func() {
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
_ = unix.Close(w.kq)
|
||||
unix.Close(w.closepipe[0])
|
||||
}()
|
||||
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
for {
|
||||
kevents, err := w.read(eventBuffer)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, kevent := range kevents {
|
||||
var (
|
||||
wd = int(kevent.Ident)
|
||||
mask = uint32(kevent.Fflags)
|
||||
)
|
||||
|
||||
// Shut down the loop when the pipe is closed, but only after all
|
||||
// other events have been processed.
|
||||
if wd == w.closepipe[0] {
|
||||
return
|
||||
}
|
||||
|
||||
path, ok := w.watches.byWd(wd)
|
||||
if debug {
|
||||
internal.Debug(path.name, &kevent)
|
||||
}
|
||||
|
||||
// On macOS it seems that sometimes an event with Ident=0 is
|
||||
// delivered, and no other flags/information beyond that, even
|
||||
// though we never saw such a file descriptor. For example in
|
||||
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
|
||||
//
|
||||
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
|
||||
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
|
||||
//
|
||||
// The first is a normal event, the second with Ident 0. No error
|
||||
// flag, no data, no ... nothing.
|
||||
//
|
||||
// I read a bit through bsd/kern_event.c from the xnu source, but I
|
||||
// don't really see an obvious location where this is triggered –
|
||||
// this doesn't seem intentional, but idk...
|
||||
//
|
||||
// Technically fd 0 is a valid descriptor, so only skip it if
|
||||
// there's no path, and if we're on macOS.
|
||||
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
|
||||
continue
|
||||
}
|
||||
|
||||
event := w.newEvent(path.name, path.linkName, mask)
|
||||
|
||||
if event.Has(Rename) || event.Has(Remove) {
|
||||
w.remove(event.Name, false)
|
||||
w.watches.markSeen(event.Name, false)
|
||||
}
|
||||
|
||||
if path.isDir && event.Has(Write) && !event.Has(Remove) {
|
||||
w.dirChange(event.Name)
|
||||
} else if !w.sendEvent(event) {
|
||||
return
|
||||
}
|
||||
|
||||
if event.Has(Remove) {
|
||||
// Look for a file that may have overwritten this; for example,
|
||||
// mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
_, found := w.watches.byPath(fileDir)
|
||||
if found {
|
||||
// TODO: this branch is never triggered in any test.
|
||||
// Added in d6220df (2012).
|
||||
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
|
||||
//
|
||||
// I don't really get how this can be triggered either.
|
||||
// And it wasn't triggered in the patch that added it,
|
||||
// either.
|
||||
//
|
||||
// Original also had a comment:
|
||||
// make sure the directory exists before we watch for
|
||||
// changes. When we do a recursive watch and perform
|
||||
// rm -rf, the parent directory might have gone
|
||||
// missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the
|
||||
// parent directory.
|
||||
err := w.dirChange(fileDir)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
path := filepath.Clean(event.Name)
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
err := w.sendCreateIfNew(path, fi)
|
||||
if !w.sendError(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if linkName != "" {
|
||||
// If the user watched "/path/link" then emit events as "/path/link"
|
||||
// rather than "/path/target".
|
||||
e.Name = linkName
|
||||
}
|
||||
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
// No point sending a write and delete event at the same time: if it's gone,
|
||||
// then it's gone.
|
||||
if e.Op.Has(Write) && e.Op.Has(Remove) {
|
||||
e.Op &^= Write
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
|
||||
files, err := os.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
path := filepath.Join(dirPath, f.Name())
|
||||
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
|
||||
cleanPath, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
// No permission to read the file; that's not a problem: just skip.
|
||||
// But do add it to w.fileExists to prevent it from being picked up
|
||||
// as a "new" file later (it still shows up in the directory
|
||||
// listing).
|
||||
switch {
|
||||
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
|
||||
cleanPath = filepath.Clean(path)
|
||||
default:
|
||||
return fmt.Errorf("%q: %w", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.watches.markSeen(cleanPath, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search the directory for new files and send an event for them.
|
||||
//
|
||||
// This functionality is to have the BSD watcher match the inotify, which sends
|
||||
// a create event for files created in a watched directory.
|
||||
func (w *kqueue) dirChange(dir string) error {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
// Directory no longer exists: we can ignore this safely. kqueue will
|
||||
// still give us the correct events.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
fi, err := f.Info()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
|
||||
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
|
||||
if err != nil {
|
||||
// Don't need to send an error if this file isn't readable.
|
||||
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("fsnotify.dirChange: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send a create event if the file isn't already being tracked, and start
|
||||
// watching this file.
|
||||
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
|
||||
if !w.watches.seenBefore(path) {
|
||||
if !w.sendEvent(Event{Name: path, Op: Create}) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Like watchDirectoryFiles, but without doing another ReadDir.
|
||||
path, err := w.internalWatch(path, fi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches.markSeen(path, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
|
||||
if fi.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories, but preserve
|
||||
// the flags used if currently watching subdirectory
|
||||
info, _ := w.watches.byPath(name)
|
||||
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// Register events with the queue.
|
||||
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types.
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// Register the events.
|
||||
success, err := unix.Kevent(w.kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(w.kq, nil, events, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
func (w *kqueue) xSupports(op Op) bool {
|
||||
if runtime.GOOS == "freebsd" {
|
||||
//return true // Supports everything.
|
||||
}
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
23
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
23
vendor/github.com/fsnotify/fsnotify/backend_other.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "errors"
|
||||
|
||||
type other struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return nil, errors.New("fsnotify not supported on the current platform")
|
||||
}
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
return newBackend(ev, errs)
|
||||
}
|
||||
func (w *other) Close() error { return nil }
|
||||
func (w *other) WatchList() []string { return nil }
|
||||
func (w *other) Add(name string) error { return nil }
|
||||
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
|
||||
func (w *other) Remove(name string) error { return nil }
|
||||
func (w *other) xSupports(op Op) bool { return false }
|
||||
682
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
682
vendor/github.com/fsnotify/fsnotify/backend_windows.go
generated
vendored
Normal file
@@ -0,0 +1,682 @@
|
||||
//go:build windows
|
||||
|
||||
// Windows backend based on ReadDirectoryChangesW()
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fsnotify/fsnotify/internal"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type readDirChangesW struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
|
||||
port windows.Handle // Handle to completion port
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
|
||||
mu sync.Mutex // Protects access to watches, closed
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
closed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
func newBackend(ev chan Event, errs chan error) (backend, error) {
|
||||
return newBufferedBackend(50, ev, errs)
|
||||
}
|
||||
|
||||
func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) {
|
||||
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
w := &readDirChangesW{
|
||||
Events: ev,
|
||||
Errors: errs,
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) isClosed() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.closed
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
event := w.newEvent(name, uint32(mask))
|
||||
event.renamedFrom = renamedFrom
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the error was sent, or false if watcher is closed.
|
||||
func (w *readDirChangesW) sendError(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
return true
|
||||
case <-w.quit:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.closed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
|
||||
|
||||
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
|
||||
if w.isClosed() {
|
||||
return ErrClosed
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
with := getOptions(opts...)
|
||||
if !w.xSupports(with.op) {
|
||||
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
|
||||
}
|
||||
if with.bufsize < 4096 {
|
||||
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
bufsize: with.bufsize,
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) Remove(name string) error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
if debug {
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
|
||||
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
|
||||
}
|
||||
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) WatchList() []string {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
entries := make([]string, 0, len(w.watches))
|
||||
for _, entry := range w.watches {
|
||||
for _, watchEntry := range entry {
|
||||
for name := range watchEntry.names {
|
||||
entries = append(entries, filepath.Join(watchEntry.path, name))
|
||||
}
|
||||
// the directory itself is being watched
|
||||
if watchEntry.mask != 0 {
|
||||
entries = append(entries, watchEntry.path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
// These options are from the old golang.org/x/exp/winfsnotify, where you could
|
||||
// add various options to the watch. This has long since been removed.
|
||||
//
|
||||
// The "sys" in the name is misleading as they're not part of any "system".
|
||||
//
|
||||
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
|
||||
const (
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
sysFSIGNORED = 0x8000
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
bufsize int
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle windows.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov windows.Overlapped
|
||||
ino *inode // i-number
|
||||
recurse bool // Recursive watch?
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf []byte // buffer, allocated later
|
||||
}
|
||||
|
||||
type (
|
||||
indexMap map[uint64]*watch
|
||||
watchMap map[uint32]indexMap
|
||||
)
|
||||
|
||||
func (w *readDirChangesW) wakeupReader() error {
|
||||
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
|
||||
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
|
||||
if err != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", err)
|
||||
}
|
||||
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
|
||||
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
|
||||
windows.FILE_LIST_DIRECTORY,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
|
||||
nil, windows.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", err)
|
||||
}
|
||||
|
||||
var fi windows.ByHandleFileInformation
|
||||
err = windows.GetFileInformationByHandle(h, &fi)
|
||||
if err != nil {
|
||||
windows.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
|
||||
if err != nil {
|
||||
windows.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", err)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
recurse: recurse,
|
||||
buf: make([]byte, bufsize),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
windows.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
|
||||
err = w.startRead(watchEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) remWatch(pathname string) error {
|
||||
pathname, recurse := recursivePath(pathname)
|
||||
|
||||
dir, err := w.getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := w.getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
|
||||
if recurse && !watch.recurse {
|
||||
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
|
||||
}
|
||||
|
||||
err = windows.CloseHandle(ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
if watch == nil {
|
||||
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *readDirChangesW) startRead(watch *watch) error {
|
||||
err := windows.CancelIo(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CancelIo", err))
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := w.toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= w.toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
err := windows.CloseHandle(watch.ino.handle)
|
||||
if err != nil {
|
||||
w.sendError(os.NewSyscallError("CloseHandle", err))
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need to pass the array, rather than the slice.
|
||||
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
|
||||
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
|
||||
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
|
||||
watch.recurse, mask, nil, &watch.ov, 0)
|
||||
if rdErr != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
|
||||
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *readDirChangesW) readEvents() {
|
||||
var (
|
||||
n uint32
|
||||
key uintptr
|
||||
ov *windows.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
// This error is handled after the watch == nil check below.
|
||||
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
|
||||
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
|
||||
err := windows.CloseHandle(w.port)
|
||||
if err != nil {
|
||||
err = os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch qErr {
|
||||
case nil:
|
||||
// No error
|
||||
case windows.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case windows.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case windows.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.sendError(ErrEventOverflow)
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
|
||||
// Create a buf that is the size of the path name
|
||||
size := int(raw.FileNameLength / 2)
|
||||
var buf []uint16
|
||||
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
|
||||
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
|
||||
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
|
||||
sh.Len = size
|
||||
sh.Cap = size
|
||||
name := windows.UTF16ToString(buf)
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
if debug {
|
||||
internal.Debug(fullname, raw.Action)
|
||||
}
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
// Update saved path of all sub-watches.
|
||||
old := filepath.Join(watch.path, watch.rename)
|
||||
w.mu.Lock()
|
||||
for _, watchMap := range w.watches {
|
||||
for _, ww := range watchMap {
|
||||
if strings.HasPrefix(ww.path, old) {
|
||||
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, "", watch.names[name]&mask)
|
||||
}
|
||||
if raw.Action == windows.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
|
||||
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
} else {
|
||||
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
|
||||
}
|
||||
|
||||
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
//lint:ignore ST1005 Windows should be capitalized
|
||||
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.sendError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case windows.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case windows.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case windows.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case windows.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case windows.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (w *readDirChangesW) xSupports(op Op) bool {
|
||||
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
|
||||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
494
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
494
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
Normal file
@@ -0,0 +1,494 @@
|
||||
// Package fsnotify provides a cross-platform interface for file system
|
||||
// notifications.
|
||||
//
|
||||
// Currently supported systems:
|
||||
//
|
||||
// - Linux via inotify
|
||||
// - BSD, macOS via kqueue
|
||||
// - Windows via ReadDirectoryChangesW
|
||||
// - illumos via FEN
|
||||
//
|
||||
// # FSNOTIFY_DEBUG
|
||||
//
|
||||
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
|
||||
// stderr. This can be useful to track down some problems, especially in cases
|
||||
// where fsnotify is used as an indirect dependency.
|
||||
//
|
||||
// Every event will be printed as soon as there's something useful to print,
|
||||
// with as little processing from fsnotify.
|
||||
//
|
||||
// Example output:
|
||||
//
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
|
||||
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Watcher watches a set of paths, delivering events on a channel.
|
||||
//
|
||||
// A watcher should not be copied (e.g. pass it by pointer, rather than by
|
||||
// value).
|
||||
//
|
||||
// # Linux notes
|
||||
//
|
||||
// When a file is removed a Remove event won't be emitted until all file
|
||||
// descriptors are closed, and deletes will always emit a Chmod. For example:
|
||||
//
|
||||
// fp := os.Open("file")
|
||||
// os.Remove("file") // Triggers Chmod
|
||||
// fp.Close() // Triggers Remove
|
||||
//
|
||||
// This is the event that inotify sends, so not much can be changed about this.
|
||||
//
|
||||
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
|
||||
// for the number of watches per user, and fs.inotify.max_user_instances
|
||||
// specifies the maximum number of inotify instances per user. Every Watcher you
|
||||
// create is an "instance", and every path you add is a "watch".
|
||||
//
|
||||
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
|
||||
// /proc/sys/fs/inotify/max_user_instances
|
||||
//
|
||||
// To increase them you can use sysctl or write the value to the /proc file:
|
||||
//
|
||||
// # Default values on Linux 5.18
|
||||
// sysctl fs.inotify.max_user_watches=124983
|
||||
// sysctl fs.inotify.max_user_instances=128
|
||||
//
|
||||
// To make the changes persist on reboot edit /etc/sysctl.conf or
|
||||
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
|
||||
// your distro's documentation):
|
||||
//
|
||||
// fs.inotify.max_user_watches=124983
|
||||
// fs.inotify.max_user_instances=128
|
||||
//
|
||||
// Reaching the limit will result in a "no space left on device" or "too many open
|
||||
// files" error.
|
||||
//
|
||||
// # kqueue notes (macOS, BSD)
|
||||
//
|
||||
// kqueue requires opening a file descriptor for every file that's being watched;
|
||||
// so if you're watching a directory with five files then that's six file
|
||||
// descriptors. You will run in to your system's "max open files" limit faster on
|
||||
// these platforms.
|
||||
//
|
||||
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
|
||||
// control the maximum number of open files, as well as /etc/login.conf on BSD
|
||||
// systems.
|
||||
//
|
||||
// # Windows notes
|
||||
//
|
||||
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
|
||||
// ("C:/path/to/dir") will also work.
|
||||
//
|
||||
// When a watched directory is removed it will always send an event for the
|
||||
// directory itself, but may not send events for all files in that directory.
|
||||
// Sometimes it will send events for all files, sometimes it will send no
|
||||
// events, and often only for some files.
|
||||
//
|
||||
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
|
||||
// value that is guaranteed to work with SMB filesystems. If you have many
|
||||
// events in quick succession this may not be enough, and you will have to use
|
||||
// [WithBufferSize] to increase the value.
|
||||
type Watcher struct {
|
||||
b backend
|
||||
|
||||
// Events sends the filesystem change events.
|
||||
//
|
||||
// fsnotify can send the following events; a "path" here can refer to a
|
||||
// file, directory, symbolic link, or special file like a FIFO.
|
||||
//
|
||||
// fsnotify.Create A new path was created; this may be followed by one
|
||||
// or more Write events if data also gets written to a
|
||||
// file.
|
||||
//
|
||||
// fsnotify.Remove A path was removed.
|
||||
//
|
||||
// fsnotify.Rename A path was renamed. A rename is always sent with the
|
||||
// old path as Event.Name, and a Create event will be
|
||||
// sent with the new name. Renames are only sent for
|
||||
// paths that are currently watched; e.g. moving an
|
||||
// unmonitored file into a monitored directory will
|
||||
// show up as just a Create. Similarly, renaming a file
|
||||
// to outside a monitored directory will show up as
|
||||
// only a Rename.
|
||||
//
|
||||
// fsnotify.Write A file or named pipe was written to. A Truncate will
|
||||
// also trigger a Write. A single "write action"
|
||||
// initiated by the user may show up as one or multiple
|
||||
// writes, depending on when the system syncs things to
|
||||
// disk. For example when compiling a large Go program
|
||||
// you may get hundreds of Write events, and you may
|
||||
// want to wait until you've stopped receiving them
|
||||
// (see the dedup example in cmd/fsnotify).
|
||||
//
|
||||
// Some systems may send Write event for directories
|
||||
// when the directory content changes.
|
||||
//
|
||||
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
|
||||
// when a file is removed (or more accurately, when a
|
||||
// link to an inode is removed). On kqueue it's sent
|
||||
// when a file is truncated. On Windows it's never
|
||||
// sent.
|
||||
Events chan Event
|
||||
|
||||
// Errors sends any errors.
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// Event represents a file system notification.
|
||||
type Event struct {
|
||||
// Path to the file or directory.
|
||||
//
|
||||
// Paths are relative to the input; for example with Add("dir") the Name
|
||||
// will be set to "dir/file" if you create that file, but if you use
|
||||
// Add("/path/to/dir") it will be "/path/to/dir/file".
|
||||
Name string
|
||||
|
||||
// File operation that triggered the event.
|
||||
//
|
||||
// This is a bitmask and some systems may send multiple operations at once.
|
||||
// Use the Event.Has() method instead of comparing with ==.
|
||||
Op Op
|
||||
|
||||
// Create events will have this set to the old path if it's a rename. This
|
||||
// only works when both the source and destination are watched. It's not
|
||||
// reliable when watching individual files, only directories.
|
||||
//
|
||||
// For example "mv /tmp/file /tmp/rename" will emit:
|
||||
//
|
||||
// Event{Op: Rename, Name: "/tmp/file"}
|
||||
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
|
||||
renamedFrom string
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
|
||||
// full description, and check them with [Event.Has].
|
||||
const (
|
||||
// A new pathname was created.
|
||||
Create Op = 1 << iota
|
||||
|
||||
// The pathname was written to; this does *not* mean the write has finished,
|
||||
// and a write can be followed by more writes.
|
||||
Write
|
||||
|
||||
// The path was removed; any watches on it will be removed. Some "remove"
|
||||
// operations may trigger a Rename if the file is actually moved (for
|
||||
// example "remove to trash" is often a rename).
|
||||
Remove
|
||||
|
||||
// The path was renamed to something else; any watches on it will be
|
||||
// removed.
|
||||
Rename
|
||||
|
||||
// File attributes were changed.
|
||||
//
|
||||
// It's generally not recommended to take action on this event, as it may
|
||||
// get triggered very frequently by some software. For example, Spotlight
|
||||
// indexing on macOS, anti-virus software, backup software, etc.
|
||||
Chmod
|
||||
|
||||
// File descriptor was opened.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableOpen
|
||||
|
||||
// File was read from.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableRead
|
||||
|
||||
// File opened for writing was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
//
|
||||
// The advantage of using this over Write is that it's more reliable than
|
||||
// waiting for Write events to stop. It's also faster (if you're not
|
||||
// listening to Write events): copying a file of a few GB can easily
|
||||
// generate tens of thousands of Write events in a short span of time.
|
||||
xUnportableCloseWrite
|
||||
|
||||
// File opened for reading was closed.
|
||||
//
|
||||
// Only works on Linux and FreeBSD.
|
||||
xUnportableCloseRead
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNonExistentWatch is used when Remove() is called on a path that's not
|
||||
// added.
|
||||
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
|
||||
|
||||
// ErrClosed is used when trying to operate on a closed Watcher.
|
||||
ErrClosed = errors.New("fsnotify: watcher already closed")
|
||||
|
||||
// ErrEventOverflow is reported from the Errors channel when there are too
|
||||
// many events:
|
||||
//
|
||||
// - inotify: inotify returns IN_Q_OVERFLOW – because there are too
|
||||
// many queued events (the fs.inotify.max_queued_events
|
||||
// sysctl can be used to increase this).
|
||||
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
|
||||
// - kqueue, fen: Not used.
|
||||
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
|
||||
|
||||
// ErrUnsupported is returned by AddWith() when WithOps() specified an
|
||||
// Unportable event that's not supported on this platform.
|
||||
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
|
||||
)
|
||||
|
||||
// NewWatcher creates a new Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
ev, errs := make(chan Event), make(chan error)
|
||||
b, err := newBackend(ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
|
||||
// channel.
|
||||
//
|
||||
// The main use case for this is situations with a very large number of events
|
||||
// where the kernel buffer size can't be increased (e.g. due to lack of
|
||||
// permissions). An unbuffered Watcher will perform better for almost all use
|
||||
// cases, and whenever possible you will be better off increasing the kernel
|
||||
// buffers instead of adding a large userspace buffer.
|
||||
func NewBufferedWatcher(sz uint) (*Watcher, error) {
|
||||
ev, errs := make(chan Event), make(chan error)
|
||||
b, err := newBufferedBackend(sz, ev, errs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Watcher{b: b, Events: ev, Errors: errs}, nil
|
||||
}
|
||||
|
||||
// Add starts monitoring the path for changes.
|
||||
//
|
||||
// A path can only be watched once; watching it more than once is a no-op and will
|
||||
// not return an error. Paths that do not yet exist on the filesystem cannot be
|
||||
// watched.
|
||||
//
|
||||
// A watch will be automatically removed if the watched path is deleted or
|
||||
// renamed. The exception is the Windows backend, which doesn't remove the
|
||||
// watcher on renames.
|
||||
//
|
||||
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
|
||||
// filesystems (/proc, /sys, etc.) generally don't work.
|
||||
//
|
||||
// Returns [ErrClosed] if [Watcher.Close] was called.
|
||||
//
|
||||
// See [Watcher.AddWith] for a version that allows adding options.
|
||||
//
|
||||
// # Watching directories
|
||||
//
|
||||
// All files in a directory are monitored, including new files that are created
|
||||
// after the watcher is started. Subdirectories are not watched (i.e. it's
|
||||
// non-recursive).
|
||||
//
|
||||
// # Watching files
|
||||
//
|
||||
// Watching individual files (rather than directories) is generally not
|
||||
// recommended as many programs (especially editors) update files atomically: it
|
||||
// will write to a temporary file which is then moved to destination,
|
||||
// overwriting the original (or some variant thereof). The watcher on the
|
||||
// original file is now lost, as that no longer exists.
|
||||
//
|
||||
// The upshot of this is that a power failure or crash won't leave a
|
||||
// half-written file.
|
||||
//
|
||||
// Watch the parent directory and use Event.Name to filter out files you're not
|
||||
// interested in. There is an example of this in cmd/fsnotify/file.go.
|
||||
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
|
||||
|
||||
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
|
||||
// the defaults described below are used.
|
||||
//
|
||||
// Possible options are:
|
||||
//
|
||||
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
|
||||
// other platforms. The default is 64K (65536 bytes).
|
||||
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
|
||||
|
||||
// Remove stops monitoring the path for changes.
|
||||
//
|
||||
// Directories are always removed non-recursively. For example, if you added
|
||||
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
|
||||
//
|
||||
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
|
||||
|
||||
// Close removes all watches and closes the Events channel.
|
||||
func (w *Watcher) Close() error { return w.b.Close() }
|
||||
|
||||
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
|
||||
// yet removed).
|
||||
//
|
||||
// Returns nil if [Watcher.Close] was called.
|
||||
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
|
||||
|
||||
// Supports reports if all the listed operations are supported by this platform.
|
||||
//
|
||||
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
|
||||
// return false for an Op starting with Unportable.
|
||||
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
|
||||
|
||||
func (o Op) String() string {
|
||||
var b strings.Builder
|
||||
if o.Has(Create) {
|
||||
b.WriteString("|CREATE")
|
||||
}
|
||||
if o.Has(Remove) {
|
||||
b.WriteString("|REMOVE")
|
||||
}
|
||||
if o.Has(Write) {
|
||||
b.WriteString("|WRITE")
|
||||
}
|
||||
if o.Has(xUnportableOpen) {
|
||||
b.WriteString("|OPEN")
|
||||
}
|
||||
if o.Has(xUnportableRead) {
|
||||
b.WriteString("|READ")
|
||||
}
|
||||
if o.Has(xUnportableCloseWrite) {
|
||||
b.WriteString("|CLOSE_WRITE")
|
||||
}
|
||||
if o.Has(xUnportableCloseRead) {
|
||||
b.WriteString("|CLOSE_READ")
|
||||
}
|
||||
if o.Has(Rename) {
|
||||
b.WriteString("|RENAME")
|
||||
}
|
||||
if o.Has(Chmod) {
|
||||
b.WriteString("|CHMOD")
|
||||
}
|
||||
if b.Len() == 0 {
|
||||
return "[no events]"
|
||||
}
|
||||
return b.String()[1:]
|
||||
}
|
||||
|
||||
// Has reports if this operation has the given operation.
|
||||
func (o Op) Has(h Op) bool { return o&h != 0 }
|
||||
|
||||
// Has reports if this event has the given operation.
|
||||
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
|
||||
|
||||
// String returns a string representation of the event with their path.
|
||||
func (e Event) String() string {
|
||||
if e.renamedFrom != "" {
|
||||
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
|
||||
}
|
||||
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
|
||||
}
|
||||
|
||||
type (
|
||||
backend interface {
|
||||
Add(string) error
|
||||
AddWith(string, ...addOpt) error
|
||||
Remove(string) error
|
||||
WatchList() []string
|
||||
Close() error
|
||||
xSupports(Op) bool
|
||||
}
|
||||
addOpt func(opt *withOpts)
|
||||
withOpts struct {
|
||||
bufsize int
|
||||
op Op
|
||||
noFollow bool
|
||||
sendCreate bool
|
||||
}
|
||||
)
|
||||
|
||||
var debug = func() bool {
|
||||
// Check for exactly "1" (rather than mere existence) so we can add
|
||||
// options/flags in the future. I don't know if we ever want that, but it's
|
||||
// nice to leave the option open.
|
||||
return os.Getenv("FSNOTIFY_DEBUG") == "1"
|
||||
}()
|
||||
|
||||
var defaultOpts = withOpts{
|
||||
bufsize: 65536, // 64K
|
||||
op: Create | Write | Remove | Rename | Chmod,
|
||||
}
|
||||
|
||||
func getOptions(opts ...addOpt) withOpts {
|
||||
with := defaultOpts
|
||||
for _, o := range opts {
|
||||
if o != nil {
|
||||
o(&with)
|
||||
}
|
||||
}
|
||||
return with
|
||||
}
|
||||
|
||||
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
|
||||
//
|
||||
// This only has effect on Windows systems, and is a no-op for other backends.
|
||||
//
|
||||
// The default value is 64K (65536 bytes) which is the highest value that works
|
||||
// on all filesystems and should be enough for most applications, but if you
|
||||
// have a large burst of events it may not be enough. You can increase it if
|
||||
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
|
||||
//
|
||||
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
|
||||
func WithBufferSize(bytes int) addOpt {
|
||||
return func(opt *withOpts) { opt.bufsize = bytes }
|
||||
}
|
||||
|
||||
// WithOps sets which operations to listen for. The default is [Create],
|
||||
// [Write], [Remove], [Rename], and [Chmod].
|
||||
//
|
||||
// Excluding operations you're not interested in can save quite a bit of CPU
|
||||
// time; in some use cases there may be hundreds of thousands of useless Write
|
||||
// or Chmod operations per second.
|
||||
//
|
||||
// This can also be used to add unportable operations not supported by all
|
||||
// platforms; unportable operations all start with "Unportable":
|
||||
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
|
||||
// [UnportableCloseRead].
|
||||
//
|
||||
// AddWith returns an error when using an unportable operation that's not
|
||||
// supported. Use [Watcher.Support] to check for support.
|
||||
func withOps(op Op) addOpt {
|
||||
return func(opt *withOpts) { opt.op = op }
|
||||
}
|
||||
|
||||
// WithNoFollow disables following symlinks, so the symlinks themselves are
|
||||
// watched.
|
||||
func withNoFollow() addOpt {
|
||||
return func(opt *withOpts) { opt.noFollow = true }
|
||||
}
|
||||
|
||||
// "Internal" option for recursive watches on inotify.
|
||||
func withCreate() addOpt {
|
||||
return func(opt *withOpts) { opt.sendCreate = true }
|
||||
}
|
||||
|
||||
var enableRecurse = false
|
||||
|
||||
// Check if this path is recursive (ends with "/..." or "\..."), and return the
|
||||
// path with the /... stripped.
|
||||
func recursivePath(path string) (string, bool) {
|
||||
path = filepath.Clean(path)
|
||||
if !enableRecurse { // Only enabled in tests for now.
|
||||
return path, false
|
||||
}
|
||||
if filepath.Base(path) == "..." {
|
||||
return filepath.Dir(path), true
|
||||
}
|
||||
return path, false
|
||||
}
|
||||
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
func SetRlimit() {
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = l.Cur
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
|
||||
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
|
||||
maxfiles = uint64(n)
|
||||
}
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
57
vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
|
||||
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
|
||||
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
|
||||
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
|
||||
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
|
||||
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
|
||||
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
|
||||
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
|
||||
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
|
||||
{"NOTE_NONE", unix.NOTE_NONE},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_REAP", unix.NOTE_REAP},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
|
||||
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
|
||||
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
|
||||
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
33
vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_OOB", unix.NOTE_OOB},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
42
vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_CLOSE", unix.NOTE_CLOSE},
|
||||
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FFAND", unix.NOTE_FFAND},
|
||||
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
|
||||
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
|
||||
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
|
||||
{"NOTE_FFNOP", unix.NOTE_FFNOP},
|
||||
{"NOTE_FFOR", unix.NOTE_FFOR},
|
||||
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
|
||||
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
|
||||
{"NOTE_OPEN", unix.NOTE_OPEN},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_READ", unix.NOTE_READ},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_SECONDS", unix.NOTE_SECONDS},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
|
||||
{"NOTE_USECONDS", unix.NOTE_USECONDS},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
32
vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, kevent *unix.Kevent_t) {
|
||||
mask := uint32(kevent.Fflags)
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
56
vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask, cookie uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"IN_ACCESS", unix.IN_ACCESS},
|
||||
{"IN_ATTRIB", unix.IN_ATTRIB},
|
||||
{"IN_CLOSE", unix.IN_CLOSE},
|
||||
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
|
||||
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
|
||||
{"IN_CREATE", unix.IN_CREATE},
|
||||
{"IN_DELETE", unix.IN_DELETE},
|
||||
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
|
||||
{"IN_IGNORED", unix.IN_IGNORED},
|
||||
{"IN_ISDIR", unix.IN_ISDIR},
|
||||
{"IN_MODIFY", unix.IN_MODIFY},
|
||||
{"IN_MOVE", unix.IN_MOVE},
|
||||
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
|
||||
{"IN_MOVED_TO", unix.IN_MOVED_TO},
|
||||
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
|
||||
{"IN_OPEN", unix.IN_OPEN},
|
||||
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
|
||||
{"IN_UNMOUNT", unix.IN_UNMOUNT},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
var c string
|
||||
if cookie > 0 {
|
||||
c = fmt.Sprintf("(cookie: %d) ", cookie)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
|
||||
}
|
||||
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
25
vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
28
vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package internal
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
var names = []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
|
||||
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
|
||||
{"NOTE_CHILD", unix.NOTE_CHILD},
|
||||
{"NOTE_DELETE", unix.NOTE_DELETE},
|
||||
{"NOTE_EOF", unix.NOTE_EOF},
|
||||
{"NOTE_EXEC", unix.NOTE_EXEC},
|
||||
{"NOTE_EXIT", unix.NOTE_EXIT},
|
||||
{"NOTE_EXTEND", unix.NOTE_EXTEND},
|
||||
{"NOTE_FORK", unix.NOTE_FORK},
|
||||
{"NOTE_LINK", unix.NOTE_LINK},
|
||||
{"NOTE_LOWAT", unix.NOTE_LOWAT},
|
||||
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
|
||||
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
|
||||
{"NOTE_RENAME", unix.NOTE_RENAME},
|
||||
{"NOTE_REVOKE", unix.NOTE_REVOKE},
|
||||
{"NOTE_TRACK", unix.NOTE_TRACK},
|
||||
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
|
||||
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
|
||||
{"NOTE_WRITE", unix.NOTE_WRITE},
|
||||
}
|
||||
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
45
vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func Debug(name string, mask int32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m int32
|
||||
}{
|
||||
{"FILE_ACCESS", unix.FILE_ACCESS},
|
||||
{"FILE_MODIFIED", unix.FILE_MODIFIED},
|
||||
{"FILE_ATTRIB", unix.FILE_ATTRIB},
|
||||
{"FILE_TRUNC", unix.FILE_TRUNC},
|
||||
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
|
||||
{"FILE_DELETE", unix.FILE_DELETE},
|
||||
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
|
||||
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
|
||||
{"UNMOUNTED", unix.UNMOUNTED},
|
||||
{"MOUNTEDOVER", unix.MOUNTEDOVER},
|
||||
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
|
||||
}
|
||||
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
40
vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
func Debug(name string, mask uint32) {
|
||||
names := []struct {
|
||||
n string
|
||||
m uint32
|
||||
}{
|
||||
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
|
||||
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
|
||||
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
|
||||
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
|
||||
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
|
||||
}
|
||||
|
||||
var (
|
||||
l []string
|
||||
unknown = mask
|
||||
)
|
||||
for _, n := range names {
|
||||
if mask&n.m == n.m {
|
||||
l = append(l, n.n)
|
||||
unknown ^= n.m
|
||||
}
|
||||
}
|
||||
if unknown > 0 {
|
||||
l = append(l, fmt.Sprintf("0x%x", unknown))
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
|
||||
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
|
||||
}
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }
|
||||
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package internal contains some helpers.
|
||||
package internal
|
||||
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
31
vendor/github.com/fsnotify/fsnotify/internal/unix.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
//go:build !windows && !darwin && !freebsd
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
SyscallEACCES = syscall.EACCES
|
||||
UnixEACCES = unix.EACCES
|
||||
)
|
||||
|
||||
var maxfiles uint64
|
||||
|
||||
func SetRlimit() {
|
||||
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
|
||||
var l syscall.Rlimit
|
||||
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
if err == nil && l.Cur != l.Max {
|
||||
l.Cur = l.Max
|
||||
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
|
||||
}
|
||||
maxfiles = uint64(l.Cur)
|
||||
}
|
||||
|
||||
func Maxfiles() uint64 { return maxfiles }
|
||||
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
|
||||
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }
|
||||
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/internal/unix2.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !windows
|
||||
|
||||
package internal
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
return true
|
||||
}
|
||||
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
41
vendor/github.com/fsnotify/fsnotify/internal/windows.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build windows
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Just a dummy.
|
||||
var (
|
||||
SyscallEACCES = errors.New("dummy")
|
||||
UnixEACCES = errors.New("dummy")
|
||||
)
|
||||
|
||||
func SetRlimit() {}
|
||||
func Maxfiles() uint64 { return 1<<64 - 1 }
|
||||
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
|
||||
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
|
||||
|
||||
func HasPrivilegesForSymlink() bool {
|
||||
var sid *windows.SID
|
||||
err := windows.AllocateAndInitializeSid(
|
||||
&windows.SECURITY_NT_AUTHORITY,
|
||||
2,
|
||||
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
&sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer windows.FreeSid(sid)
|
||||
token := windows.Token(0)
|
||||
member, err := token.IsMember(sid)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return member || token.IsElevated()
|
||||
}
|
||||
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
7
vendor/github.com/fsnotify/fsnotify/system_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
||||
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
8
vendor/github.com/fsnotify/fsnotify/system_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
|
||||
107
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
107
vendor/github.com/go-sql-driver/mysql/.travis.yml
generated
vendored
@@ -1,107 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
before_script:
|
||||
- echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
|
||||
- sudo service mysql restart
|
||||
- .travis/wait_mysql.sh
|
||||
- mysql -e 'create database gotest;'
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- env: DB=MYSQL8
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mysql:8.0
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MYSQL57
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mysql:5.7
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MARIA55
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mariadb:5.5
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
- env: DB=MARIA10_1
|
||||
sudo: required
|
||||
dist: trusty
|
||||
go: 1.10.x
|
||||
services:
|
||||
- docker
|
||||
before_install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker pull mariadb:10.1
|
||||
- docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
|
||||
mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
|
||||
- cp .travis/docker.cnf ~/.my.cnf
|
||||
- .travis/wait_mysql.sh
|
||||
before_script:
|
||||
- export MYSQL_TEST_USER=gotest
|
||||
- export MYSQL_TEST_PASS=secret
|
||||
- export MYSQL_TEST_ADDR=127.0.0.1:3307
|
||||
- export MYSQL_TEST_CONCURRENT=1
|
||||
|
||||
script:
|
||||
- go test -v -covermode=count -coverprofile=coverage.out
|
||||
- go vet ./...
|
||||
- .travis/gofmt.sh
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
|
||||
53
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
53
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@@ -13,33 +13,52 @@
|
||||
|
||||
Aaron Hopkins <go-sql-driver at die.net>
|
||||
Achille Roussel <achille.roussel at gmail.com>
|
||||
Aidan <aidan.liu at pingcap.com>
|
||||
Alex Snast <alexsn at fb.com>
|
||||
Alexey Palazhchenko <alexey.palazhchenko at gmail.com>
|
||||
Andrew Reid <andrew.reid at tixtrack.com>
|
||||
Animesh Ray <mail.rayanimesh at gmail.com>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Ariel Mashraki <ariel at mashraki.co.il>
|
||||
Asta Xie <xiemengjun at gmail.com>
|
||||
Brian Hendriks <brian at dolthub.com>
|
||||
Bulat Gaifullin <gaifullinbf at gmail.com>
|
||||
Caine Jette <jette at alum.mit.edu>
|
||||
Carlos Nieto <jose.carlos at menteslibres.net>
|
||||
Chris Kirkland <chriskirkland at github.com>
|
||||
Chris Moos <chris at tech9computers.com>
|
||||
Craig Wilson <craiggwilson at gmail.com>
|
||||
Daemonxiao <735462752 at qq.com>
|
||||
Daniel Montoya <dsmontoyam at gmail.com>
|
||||
Daniel Nichter <nil at codenode.com>
|
||||
Daniël van Eeden <git at myname.nl>
|
||||
Dave Protasowski <dprotaso at gmail.com>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Egor Smolyakov <egorsmkv at gmail.com>
|
||||
Erwan Martin <hello at erwan.io>
|
||||
Evan Elias <evan at skeema.net>
|
||||
Evan Shaw <evan at vendhq.com>
|
||||
Frederick Mayle <frederickmayle at gmail.com>
|
||||
Gustavo Kristic <gkristic at gmail.com>
|
||||
Gusted <postmaster at gusted.xyz>
|
||||
Hajime Nakagami <nakagami at gmail.com>
|
||||
Hanno Braun <mail at hannobraun.com>
|
||||
Henri Yandell <flamefew at gmail.com>
|
||||
Hirotaka Yamamoto <ymmt2005 at gmail.com>
|
||||
Huyiguang <hyg at webterren.com>
|
||||
ICHINOSE Shogo <shogo82148 at gmail.com>
|
||||
Ilia Cimpoes <ichimpoesh at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
Jacek Szwec <szwec.jacek at gmail.com>
|
||||
James Harr <james.harr at gmail.com>
|
||||
Janek Vedock <janekvedock at comcast.net>
|
||||
Jason Ng <oblitorum at gmail.com>
|
||||
Jean-Yves Pellé <jy at pelle.link>
|
||||
Jeff Hodges <jeff at somethingsimilar.com>
|
||||
Jeffrey Charles <jeffreycharles at gmail.com>
|
||||
Jennifer Purevsuren <jennifer at dolthub.com>
|
||||
Jerome Meyer <jxmeyer at gmail.com>
|
||||
Jiajia Zhong <zhong2plus at gmail.com>
|
||||
Jian Zhen <zhenjl at gmail.com>
|
||||
Joshua Prunier <joshua.prunier at gmail.com>
|
||||
Julien Lefevre <julien.lefevr at gmail.com>
|
||||
@@ -47,43 +66,77 @@ Julien Schmidt <go-sql-driver at julienschmidt.com>
|
||||
Justin Li <jli at j-li.net>
|
||||
Justin Nuß <nuss.justin at gmail.com>
|
||||
Kamil Dziedzic <kamil at klecza.pl>
|
||||
Kei Kamikawa <x00.x7f.x86 at gmail.com>
|
||||
Kevin Malachowski <kevin at chowski.com>
|
||||
Kieron Woodhouse <kieron.woodhouse at infosum.com>
|
||||
Lance Tian <lance6716 at gmail.com>
|
||||
Lennart Rudolph <lrudolph at hmc.edu>
|
||||
Leonardo YongUk Kim <dalinaum at gmail.com>
|
||||
Linh Tran Tuan <linhduonggnu at gmail.com>
|
||||
Lion Yang <lion at aosc.xyz>
|
||||
Luca Looz <luca.looz92 at gmail.com>
|
||||
Lucas Liu <extrafliu at gmail.com>
|
||||
Lunny Xiao <xiaolunwen at gmail.com>
|
||||
Luke Scott <luke at webconnex.com>
|
||||
Maciej Zimnoch <maciej.zimnoch at codilime.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nathanial Murphy <nathanial.murphy at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Oliver Bone <owbone at github.com>
|
||||
Olivier Mengué <dolmen at cpan.org>
|
||||
oscarzhao <oscarzhaosl at gmail.com>
|
||||
Paul Bonser <misterpib at gmail.com>
|
||||
Paulius Lozys <pauliuslozys at gmail.com>
|
||||
Peter Schultz <peter.schultz at classmarkets.com>
|
||||
Phil Porada <philporada at gmail.com>
|
||||
Rebecca Chin <rchin at pivotal.io>
|
||||
Reed Allman <rdallman10 at gmail.com>
|
||||
Richard Wilkes <wilkes at me.com>
|
||||
Robert Russell <robert at rrbrussell.com>
|
||||
Runrioter Wung <runrioter at gmail.com>
|
||||
Samantha Frank <hello at entropy.cat>
|
||||
Santhosh Kumar Tekuri <santhosh.tekuri at gmail.com>
|
||||
Sho Iizuka <sho.i518 at gmail.com>
|
||||
Sho Ikeda <suicaicoca at gmail.com>
|
||||
Shuode Li <elemount at qq.com>
|
||||
Simon J Mudd <sjmudd at pobox.com>
|
||||
Soroush Pour <me at soroushjp.com>
|
||||
Stan Putrya <root.vagner at gmail.com>
|
||||
Stanley Gunawan <gunawan.stanley at gmail.com>
|
||||
Steven Hartland <steven.hartland at multiplay.co.uk>
|
||||
Tan Jinhua <312841925 at qq.com>
|
||||
Tetsuro Aoki <t.aoki1130 at gmail.com>
|
||||
Thomas Wodarek <wodarekwebpage at gmail.com>
|
||||
Tim Ruffles <timruffles at gmail.com>
|
||||
Tom Jenkinson <tom at tjenkinson.me>
|
||||
Vladimir Kovpak <cn007b at gmail.com>
|
||||
Vladyslav Zhelezniak <zhvladi at gmail.com>
|
||||
Xiangyu Hu <xiangyu.hu at outlook.com>
|
||||
Xiaobing Jiang <s7v7nislands at gmail.com>
|
||||
Xiuming Chen <cc at cxm.cc>
|
||||
Xuehong Chan <chanxuehong at gmail.com>
|
||||
Zhang Xiang <angwerzx at 126.com>
|
||||
Zhenye Xie <xiezhenye at gmail.com>
|
||||
Zhixin Wen <john.wenzhixin at gmail.com>
|
||||
Ziheng Lyu <zihenglv at gmail.com>
|
||||
|
||||
# Organizations
|
||||
|
||||
Barracuda Networks, Inc.
|
||||
Counting Ltd.
|
||||
DigitalOcean Inc.
|
||||
Dolthub Inc.
|
||||
dyves labs AG
|
||||
Facebook Inc.
|
||||
GitHub Inc.
|
||||
Google Inc.
|
||||
InfoSum Ltd.
|
||||
Keybase Inc.
|
||||
Microsoft Corp.
|
||||
Multiplay Ltd.
|
||||
Percona LLC
|
||||
PingCAP Inc.
|
||||
Pivotal Inc.
|
||||
Shattered Silicon Ltd.
|
||||
Stripe Inc.
|
||||
Zendesk Inc.
|
||||
|
||||
136
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
136
vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
generated
vendored
@@ -1,3 +1,133 @@
|
||||
## Version 1.8.1 (2024-03-26)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
|
||||
|
||||
## Version 1.8.0 (2024-03-09)
|
||||
|
||||
Major Changes:
|
||||
|
||||
- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
|
||||
- Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
|
||||
- If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
|
||||
- If you specify charset, go-mysql-driver sends `SET NAMES <charset>`. This uses the server's default collation for `<charset>`.
|
||||
- If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
|
||||
- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
|
||||
- This is backward incompatible in rare case. Check your DSN.
|
||||
- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
|
||||
- Use Go 1.18+
|
||||
- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
|
||||
- When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
|
||||
- If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
|
||||
- This confused users because most user doesn't know when text/binary protocol used.
|
||||
- go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
|
||||
- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
|
||||
- Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
|
||||
- Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
|
||||
|
||||
|
||||
Other changes:
|
||||
|
||||
- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
|
||||
- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
|
||||
- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
|
||||
- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
|
||||
- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
|
||||
- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
|
||||
- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
|
||||
- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
|
||||
- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
|
||||
- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
|
||||
|
||||
## Version 1.7.1 (2023-04-25)
|
||||
|
||||
Changes:
|
||||
|
||||
- bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
|
||||
- Add go1.20 and mariadb10.11 to the testing matrix (#1403)
|
||||
- Increase default maxAllowedPacket size. (#1411)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Use SET syntax as specified in the MySQL documentation (#1402)
|
||||
|
||||
|
||||
## Version 1.7 (2022-11-29)
|
||||
|
||||
Changes:
|
||||
|
||||
- Drop support of Go 1.12 (#1211)
|
||||
- Refactoring `(*textRows).readRow` in a more clear way (#1230)
|
||||
- util: Reduce boundary check in escape functions. (#1316)
|
||||
- enhancement for mysqlConn handleAuthResult (#1250)
|
||||
|
||||
New Features:
|
||||
|
||||
- support Is comparison on MySQLError (#1210)
|
||||
- return unsigned in database type name when necessary (#1238)
|
||||
- Add API to express like a --ssl-mode=PREFERRED MySQL client (#1370)
|
||||
- Add SQLState to MySQLError (#1321)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Fix parsing 0 year. (#1257)
|
||||
|
||||
|
||||
## Version 1.6 (2021-04-01)
|
||||
|
||||
Changes:
|
||||
|
||||
- Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
|
||||
- `NullTime` is deprecated (#960, #1144)
|
||||
- Reduce allocations when building SET command (#1111)
|
||||
- Performance improvement for time formatting (#1118)
|
||||
- Performance improvement for time parsing (#1098, #1113)
|
||||
|
||||
New Features:
|
||||
|
||||
- Implement `driver.Validator` interface (#1106, #1174)
|
||||
- Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
|
||||
- Add `json.RawMessage` for converter and prepared statement (#1059)
|
||||
- Interpolate `json.RawMessage` as `string` (#1058)
|
||||
- Implements `CheckNamedValue` (#1090)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Stop rounding times (#1121, #1172)
|
||||
- Put zero filler into the SSL handshake packet (#1066)
|
||||
- Fix checking cancelled connections back into the connection pool (#1095)
|
||||
- Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
|
||||
|
||||
|
||||
## Version 1.5 (2020-01-07)
|
||||
|
||||
Changes:
|
||||
|
||||
- Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
|
||||
- Improve buffer handling (#890)
|
||||
- Document potentially insecure TLS configs (#901)
|
||||
- Use a double-buffering scheme to prevent data races (#943)
|
||||
- Pass uint64 values without converting them to string (#838, #955)
|
||||
- Update collations and make utf8mb4 default (#877, #1054)
|
||||
- Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
|
||||
- Removed CloudSQL support (#993, #1007)
|
||||
- Add Go Module support (#1003)
|
||||
|
||||
New Features:
|
||||
|
||||
- Implement support of optional TLS (#900)
|
||||
- Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
|
||||
- Implement Connector Interface (#941, #958, #1020, #1035)
|
||||
|
||||
Bugfixes:
|
||||
|
||||
- Mark connections as bad on error during ping (#875)
|
||||
- Mark connections as bad on error during dial (#867)
|
||||
- Fix connection leak caused by rapid context cancellation (#1024)
|
||||
- Mark connections as bad on error during Conn.Prepare (#1030)
|
||||
|
||||
|
||||
## Version 1.4.1 (2018-11-14)
|
||||
|
||||
Bugfixes:
|
||||
@@ -74,7 +204,7 @@ New Features:
|
||||
|
||||
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
|
||||
- Support for returning table alias on Columns() (#289, #359, #382)
|
||||
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
|
||||
- Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
|
||||
- Support for uint64 parameters with high bit set (#332, #345)
|
||||
- Cleartext authentication plugin support (#327)
|
||||
- Exported ParseDSN function and the Config struct (#403, #419, #429)
|
||||
@@ -118,7 +248,7 @@ Changes:
|
||||
- Also exported the MySQLWarning type
|
||||
- mysqlConn.Close returns the first error encountered instead of ignoring all errors
|
||||
- writePacket() automatically writes the packet size to the header
|
||||
- readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
|
||||
- readPacket() uses an iterative approach instead of the recursive approach to merge split packets
|
||||
|
||||
New Features:
|
||||
|
||||
@@ -166,7 +296,7 @@ Bugfixes:
|
||||
|
||||
- Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
|
||||
- Convert to DB timezone when inserting `time.Time`
|
||||
- Splitted packets (more than 16MB) are now merged correctly
|
||||
- Split packets (more than 16MB) are now merged correctly
|
||||
- Fixed false positive `io.EOF` errors when the data was fully read
|
||||
- Avoid panics on reuse of closed connections
|
||||
- Fixed empty string producing false nil values
|
||||
|
||||
23
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
23
vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
generated
vendored
@@ -1,23 +0,0 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
|
||||
|
||||
## Contributing Code
|
||||
|
||||
By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
|
||||
Don't forget to add yourself to the AUTHORS file.
|
||||
|
||||
### Code Review
|
||||
|
||||
Everyone is invited to review and comment on pull requests.
|
||||
If it looks fine to you, comment with "LGTM" (Looks good to me).
|
||||
|
||||
If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
|
||||
|
||||
Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
|
||||
|
||||
## Development Ideas
|
||||
|
||||
If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
|
||||
171
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
171
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
@@ -35,20 +35,28 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
|
||||
* Supports queries larger than 16MB
|
||||
* Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
|
||||
* Intelligent `LONG DATA` handling in prepared statements
|
||||
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
|
||||
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
|
||||
* Optional `time.Time` parsing
|
||||
* Optional placeholder interpolation
|
||||
|
||||
## Requirements
|
||||
* Go 1.7 or higher. We aim to support the 3 latest versions of Go.
|
||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
||||
|
||||
* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
|
||||
* MySQL (5.7+) and MariaDB (10.3+) are supported.
|
||||
* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
|
||||
* Do not ask questions about TiDB in our issue tracker or forum.
|
||||
* [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
|
||||
* [Forum](https://ask.pingcap.com/)
|
||||
* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
|
||||
* Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
|
||||
* Investigate issues yourself and please send a pull request to fix it.
|
||||
|
||||
---------------------------------------
|
||||
|
||||
## Installation
|
||||
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
|
||||
```bash
|
||||
$ go get -u github.com/go-sql-driver/mysql
|
||||
go get -u github.com/go-sql-driver/mysql
|
||||
```
|
||||
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
|
||||
@@ -56,15 +64,37 @@ Make sure [Git is installed](https://git-scm.com/downloads) on your machine and
|
||||
_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
|
||||
|
||||
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
|
||||
|
||||
```go
|
||||
import "database/sql"
|
||||
import _ "github.com/go-sql-driver/mysql"
|
||||
import (
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
// ...
|
||||
|
||||
db, err := sql.Open("mysql", "user:password@/dbname")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// See "Important settings" section.
|
||||
db.SetConnMaxLifetime(time.Minute * 3)
|
||||
db.SetMaxOpenConns(10)
|
||||
db.SetMaxIdleConns(10)
|
||||
```
|
||||
|
||||
[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
|
||||
|
||||
### Important settings
|
||||
|
||||
`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
|
||||
|
||||
`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
|
||||
|
||||
`db.SetMaxIdleConns()` is recommended to be set same to `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed much more frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
|
||||
|
||||
|
||||
### DSN (Data Source Name)
|
||||
|
||||
@@ -92,6 +122,12 @@ This has the same effect as an empty DSN string:
|
||||
|
||||
```
|
||||
|
||||
`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
|
||||
|
||||
```
|
||||
/dbname%2Fwithslash
|
||||
```
|
||||
|
||||
Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
|
||||
|
||||
#### Password
|
||||
@@ -99,7 +135,7 @@ Passwords can consist of any character. Escaping is **not** necessary.
|
||||
|
||||
#### Protocol
|
||||
See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
|
||||
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
|
||||
In general you should use a Unix domain socket if available and TCP otherwise for best performance.
|
||||
|
||||
#### Address
|
||||
For TCP and UDP networks, addresses have the form `host[:port]`.
|
||||
@@ -122,8 +158,8 @@ Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
|
||||
[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
|
||||
`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
|
||||
[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
|
||||
|
||||
##### `allowCleartextPasswords`
|
||||
|
||||
@@ -133,7 +169,18 @@ Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
||||
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
|
||||
|
||||
|
||||
##### `allowFallbackToPlaintext`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
`allowFallbackToPlaintext=true` acts like a `--ssl-mode=PREFERRED` MySQL client as described in [Command Options for Connecting to the Server](https://dev.mysql.com/doc/refman/5.7/en/connection-options.html#option_general_ssl-mode)
|
||||
|
||||
##### `allowNativePasswords`
|
||||
|
||||
@@ -161,23 +208,39 @@ Valid Values: <name>
|
||||
Default: none
|
||||
```
|
||||
|
||||
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
|
||||
Sets the charset used for client-server interaction (`"SET NAMES <value>"`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
|
||||
|
||||
Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
|
||||
Unless you need the fallback behavior, please use `collation` instead.
|
||||
See also [Unicode Support](#unicode-support).
|
||||
|
||||
##### `checkConnLiveness`
|
||||
|
||||
```
|
||||
Type: bool
|
||||
Valid Values: true, false
|
||||
Default: true
|
||||
```
|
||||
|
||||
On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
|
||||
`checkConnLiveness=false` disables this liveness check of connections.
|
||||
|
||||
##### `collation`
|
||||
|
||||
```
|
||||
Type: string
|
||||
Valid Values: <name>
|
||||
Default: utf8_general_ci
|
||||
Default: utf8mb4_general_ci
|
||||
```
|
||||
|
||||
Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
|
||||
|
||||
A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
|
||||
|
||||
The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
|
||||
|
||||
Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
|
||||
|
||||
See also [Unicode Support](#unicode-support).
|
||||
|
||||
##### `clientFoundRows`
|
||||
|
||||
```
|
||||
@@ -214,7 +277,7 @@ Default: false
|
||||
|
||||
If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
|
||||
|
||||
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
|
||||
*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
|
||||
|
||||
##### `loc`
|
||||
|
||||
@@ -230,13 +293,22 @@ Note that this sets the location for time.Time values but does not change MySQL'
|
||||
|
||||
Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
|
||||
|
||||
##### `timeTruncate`
|
||||
|
||||
```
|
||||
Type: duration
|
||||
Default: 0
|
||||
```
|
||||
|
||||
[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
##### `maxAllowedPacket`
|
||||
```
|
||||
Type: decimal number
|
||||
Default: 4194304
|
||||
Default: 64*1024*1024
|
||||
```
|
||||
|
||||
Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
|
||||
Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
|
||||
|
||||
##### `multiStatements`
|
||||
|
||||
@@ -246,9 +318,25 @@ Valid Values: true, false
|
||||
Default: false
|
||||
```
|
||||
|
||||
Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
|
||||
Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
|
||||
|
||||
When `multiStatements` is used, `?` parameters must only be used in the first statement.
|
||||
When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
|
||||
|
||||
It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
|
||||
|
||||
```go
|
||||
conn, _ := db.Conn(ctx)
|
||||
conn.Raw(func(conn any) error {
|
||||
ex := conn.(driver.Execer)
|
||||
res, err := ex.Exec(`
|
||||
UPDATE point SET x = 1 WHERE y = 2;
|
||||
UPDATE point SET x = 2 WHERE y = 3;
|
||||
`, nil)
|
||||
// Both slices have 2 elements.
|
||||
log.Print(res.(mysql.Result).AllRowsAffected())
|
||||
log.Print(res.(mysql.Result).AllLastInsertIds())
|
||||
})
|
||||
```
|
||||
|
||||
##### `parseTime`
|
||||
|
||||
@@ -328,11 +416,11 @@ Timeout for establishing connections, aka dial timeout. The value must be a deci
|
||||
|
||||
```
|
||||
Type: bool / string
|
||||
Valid Values: true, false, skip-verify, <name>
|
||||
Valid Values: true, false, skip-verify, preferred, <name>
|
||||
Default: false
|
||||
```
|
||||
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
|
||||
|
||||
|
||||
##### `writeTimeout`
|
||||
@@ -344,6 +432,15 @@ Default: 0
|
||||
|
||||
I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
|
||||
|
||||
##### `connectionAttributes`
|
||||
|
||||
```
|
||||
Type: comma-delimited string of user-defined "key:value" pairs
|
||||
Valid Values: (<name1>:<value1>,<name2>:<value2>,...)
|
||||
Default: none
|
||||
```
|
||||
|
||||
[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
|
||||
|
||||
##### System Variables
|
||||
|
||||
@@ -360,7 +457,7 @@ Rules:
|
||||
Examples:
|
||||
* `autocommit=1`: `SET autocommit=1`
|
||||
* [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
|
||||
* [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
|
||||
* [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
|
||||
|
||||
|
||||
#### Examples
|
||||
@@ -391,14 +488,9 @@ TCP on a remote host, e.g. Amazon RDS:
|
||||
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine (First Generation MySQL Server):
|
||||
Google Cloud SQL on App Engine:
|
||||
```
|
||||
user@cloudsql(project-id:instance-name)/dbname
|
||||
```
|
||||
|
||||
Google Cloud SQL on App Engine (Second Generation MySQL Server):
|
||||
```
|
||||
user@cloudsql(project-id:regionname:instance-name)/dbname
|
||||
user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
|
||||
```
|
||||
|
||||
TCP using default port (3306) on localhost:
|
||||
@@ -421,7 +513,7 @@ user:password@/
|
||||
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
|
||||
|
||||
## `ColumnType` Support
|
||||
This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
|
||||
This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
|
||||
|
||||
## `context.Context` Support
|
||||
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
|
||||
@@ -434,7 +526,7 @@ For this feature you need direct access to the package. Therefore you must chang
|
||||
import "github.com/go-sql-driver/mysql"
|
||||
```
|
||||
|
||||
Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
|
||||
Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
|
||||
|
||||
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
|
||||
|
||||
@@ -444,21 +536,21 @@ See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/my
|
||||
### `time.Time` support
|
||||
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
|
||||
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
|
||||
|
||||
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
|
||||
|
||||
Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
|
||||
|
||||
|
||||
### Unicode support
|
||||
Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
|
||||
Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
|
||||
|
||||
Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
|
||||
Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
|
||||
|
||||
Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
|
||||
- When only the `charset` is specified, the `SET NAMES <charset>` query is sent and the server's default collation is used.
|
||||
- When both the `charset` and `collation` are specified, the `SET NAMES <charset> COLLATE <collation>` query is sent.
|
||||
- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
|
||||
|
||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
|
||||
## Testing / Development
|
||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
||||
@@ -466,7 +558,7 @@ To run the driver tests you may need to adjust the configuration. See the [Testi
|
||||
Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
|
||||
If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
|
||||
|
||||
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
|
||||
See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
|
||||
|
||||
---------------------------------------
|
||||
|
||||
@@ -487,4 +579,3 @@ Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you
|
||||
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
|
||||
|
||||

|
||||
|
||||
|
||||
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
19
vendor/github.com/go-sql-driver/mysql/appengine.go
generated
vendored
@@ -1,19 +0,0 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"google.golang.org/appengine/cloudsql"
|
||||
)
|
||||
|
||||
func init() {
|
||||
RegisterDial("cloudsql", cloudsql.Dial)
|
||||
}
|
||||
19
vendor/github.com/go-sql-driver/mysql/atomic_bool.go
generated
vendored
Normal file
19
vendor/github.com/go-sql-driver/mysql/atomic_bool.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
|
||||
//
|
||||
// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package mysql
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
/******************************************************************************
|
||||
* Sync utils *
|
||||
******************************************************************************/
|
||||
|
||||
type atomicBool = atomic.Bool
|
||||
47
vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
generated
vendored
Normal file
47
vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
|
||||
//
|
||||
// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
//go:build !go1.19
|
||||
// +build !go1.19
|
||||
|
||||
package mysql
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
/******************************************************************************
|
||||
* Sync utils *
|
||||
******************************************************************************/
|
||||
|
||||
// atomicBool is an implementation of atomic.Bool for older version of Go.
|
||||
// it is a wrapper around uint32 for usage as a boolean value with
|
||||
// atomic access.
|
||||
type atomicBool struct {
|
||||
_ noCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// Load returns whether the current boolean value is true
|
||||
func (ab *atomicBool) Load() bool {
|
||||
return atomic.LoadUint32(&ab.value) > 0
|
||||
}
|
||||
|
||||
// Store sets the value of the bool regardless of the previous value
|
||||
func (ab *atomicBool) Store(value bool) {
|
||||
if value {
|
||||
atomic.StoreUint32(&ab.value, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&ab.value, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Swap sets the value of the bool and returns the old value.
|
||||
func (ab *atomicBool) Swap(value bool) bool {
|
||||
if value {
|
||||
return atomic.SwapUint32(&ab.value, 1) > 0
|
||||
}
|
||||
return atomic.SwapUint32(&ab.value, 0) > 0
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user