You've already forked openaccounting-server
forked from cybercinch/openaccounting-server
deps: update dependencies for GORM, Viper, and SQLite support
- Add GORM v1.25.12 with MySQL and SQLite drivers - Add Viper v1.19.0 for configuration management - Add UUID package for GORM model IDs - Update vendor directory with new dependencies - Update Go module requirements and checksums 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
6
vendor/gorm.io/driver/mysql/.gitignore
generated
vendored
Normal file
6
vendor/gorm.io/driver/mysql/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
TODO*
|
||||
documents
|
||||
coverage.txt
|
||||
_book
|
||||
.idea
|
||||
vendor
|
||||
21
vendor/gorm.io/driver/mysql/License
generated
vendored
Normal file
21
vendor/gorm.io/driver/mysql/License
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
52
vendor/gorm.io/driver/mysql/README.md
generated
vendored
Normal file
52
vendor/gorm.io/driver/mysql/README.md
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
# GORM MySQL Driver
|
||||
|
||||
## Quick Start
|
||||
|
||||
```go
|
||||
import (
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// https://github.com/go-sql-driver/mysql
|
||||
dsn := "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True&loc=Local"
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
```go
|
||||
import (
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var datetimePrecision = 2
|
||||
|
||||
db, err := gorm.Open(mysql.New(mysql.Config{
|
||||
DSN: "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True&loc=Local", // data source name, refer https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
DefaultStringSize: 256, // add default size for string fields, by default, will use db type `longtext` for fields without size, not a primary key, no index defined and don't have default values
|
||||
DisableDatetimePrecision: true, // disable datetime precision support, which not supported before MySQL 5.6
|
||||
DefaultDatetimePrecision: &datetimePrecision, // default datetime precision
|
||||
DontSupportRenameIndex: true, // drop & create index when rename index, rename index not supported before MySQL 5.7, MariaDB
|
||||
DontSupportRenameColumn: true, // use change when rename column, rename rename not supported before MySQL 8, MariaDB
|
||||
SkipInitializeWithVersion: false, // smart configure based on used version
|
||||
}), &gorm.Config{})
|
||||
```
|
||||
|
||||
## Customized Driver
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "example.com/my_mysql_driver"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/driver/mysql"
|
||||
)
|
||||
|
||||
db, err := gorm.Open(mysql.New(mysql.Config{
|
||||
DriverName: "my_mysql_driver_name",
|
||||
DSN: "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True&loc=Local", // data source name, refer https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||
})
|
||||
```
|
||||
|
||||
Checkout [https://gorm.io](https://gorm.io) for details.
|
||||
25
vendor/gorm.io/driver/mysql/error_translator.go
generated
vendored
Normal file
25
vendor/gorm.io/driver/mysql/error_translator.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"github.com/go-sql-driver/mysql"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// The error codes to map mysql errors to gorm errors, here is the mysql error codes reference https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html.
|
||||
var errCodes = map[uint16]error{
|
||||
1062: gorm.ErrDuplicatedKey,
|
||||
1451: gorm.ErrForeignKeyViolated,
|
||||
1452: gorm.ErrForeignKeyViolated,
|
||||
}
|
||||
|
||||
func (dialector Dialector) Translate(err error) error {
|
||||
if mysqlErr, ok := err.(*mysql.MySQLError); ok {
|
||||
if translatedErr, found := errCodes[mysqlErr.Number]; found {
|
||||
return translatedErr
|
||||
}
|
||||
return mysqlErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
518
vendor/gorm.io/driver/mysql/migrator.go
generated
vendored
Normal file
518
vendor/gorm.io/driver/mysql/migrator.go
generated
vendored
Normal file
@@ -0,0 +1,518 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/migrator"
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
const indexSql = `
|
||||
SELECT
|
||||
TABLE_NAME,
|
||||
COLUMN_NAME,
|
||||
INDEX_NAME,
|
||||
NON_UNIQUE
|
||||
FROM
|
||||
information_schema.STATISTICS
|
||||
WHERE
|
||||
TABLE_SCHEMA = ?
|
||||
AND TABLE_NAME = ?
|
||||
ORDER BY
|
||||
INDEX_NAME,
|
||||
SEQ_IN_INDEX`
|
||||
|
||||
var typeAliasMap = map[string][]string{
|
||||
"bool": {"tinyint"},
|
||||
"tinyint": {"bool"},
|
||||
}
|
||||
|
||||
type Migrator struct {
|
||||
migrator.Migrator
|
||||
Dialector
|
||||
}
|
||||
|
||||
func (m Migrator) FullDataTypeOf(field *schema.Field) clause.Expr {
|
||||
expr := m.Migrator.FullDataTypeOf(field)
|
||||
|
||||
if value, ok := field.TagSettings["COMMENT"]; ok {
|
||||
expr.SQL += " COMMENT " + m.Dialector.Explain("?", value)
|
||||
}
|
||||
|
||||
return expr
|
||||
}
|
||||
|
||||
// MigrateColumnUnique migrate column's UNIQUE constraint.
|
||||
// In MySQL, ColumnType's Unique is affected by UniqueIndex, so we have to take care of the UniqueIndex.
|
||||
func (m Migrator) MigrateColumnUnique(value interface{}, field *schema.Field, columnType gorm.ColumnType) error {
|
||||
unique, ok := columnType.Unique()
|
||||
if !ok || field.PrimaryKey {
|
||||
return nil // skip primary key
|
||||
}
|
||||
|
||||
queryTx, execTx := m.GetQueryAndExecTx()
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
// We're currently only receiving boolean values on `Unique` tag,
|
||||
// so the UniqueConstraint name is fixed
|
||||
constraint := m.DB.NamingStrategy.UniqueName(stmt.Table, field.DBName)
|
||||
if unique {
|
||||
// Clean up redundant unique indexes
|
||||
indexes, _ := queryTx.Migrator().GetIndexes(value)
|
||||
for _, index := range indexes {
|
||||
if uni, ok := index.Unique(); !ok || !uni {
|
||||
continue
|
||||
}
|
||||
if columns := index.Columns(); len(columns) != 1 || columns[0] != field.DBName {
|
||||
continue
|
||||
}
|
||||
if name := index.Name(); name == constraint || name == field.UniqueIndex {
|
||||
continue
|
||||
}
|
||||
if err := execTx.Migrator().DropIndex(value, index.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hasConstraint := queryTx.Migrator().HasConstraint(value, constraint)
|
||||
switch {
|
||||
case field.Unique && !hasConstraint:
|
||||
if field.Unique {
|
||||
if err := execTx.Migrator().CreateConstraint(value, constraint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// field isn't Unique but ColumnType's Unique is reported by UniqueConstraint.
|
||||
case !field.Unique && hasConstraint:
|
||||
if err := execTx.Migrator().DropConstraint(value, constraint); err != nil {
|
||||
return err
|
||||
}
|
||||
if field.UniqueIndex != "" {
|
||||
if err := execTx.Migrator().CreateIndex(value, field.UniqueIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if field.UniqueIndex != "" && !queryTx.Migrator().HasIndex(value, field.UniqueIndex) {
|
||||
if err := execTx.Migrator().CreateIndex(value, field.UniqueIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if field.Unique {
|
||||
if err := execTx.Migrator().CreateConstraint(value, constraint); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if field.UniqueIndex != "" && !queryTx.Migrator().HasIndex(value, field.UniqueIndex) {
|
||||
if err := execTx.Migrator().CreateIndex(value, field.UniqueIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) AddColumn(value interface{}, name string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
// avoid using the same name field
|
||||
f := stmt.Schema.LookUpField(name)
|
||||
if f == nil {
|
||||
return fmt.Errorf("failed to look up field with name: %s", name)
|
||||
}
|
||||
|
||||
if !f.IgnoreMigration {
|
||||
fieldType := m.FullDataTypeOf(f)
|
||||
columnName := clause.Column{Name: f.DBName}
|
||||
values := []interface{}{m.CurrentTable(stmt), columnName, fieldType}
|
||||
var alterSql strings.Builder
|
||||
alterSql.WriteString("ALTER TABLE ? ADD ? ?")
|
||||
if f.PrimaryKey || strings.Contains(strings.ToLower(fieldType.SQL), "auto_increment") {
|
||||
alterSql.WriteString(", ADD PRIMARY KEY (?)")
|
||||
values = append(values, columnName)
|
||||
}
|
||||
return m.DB.Exec(alterSql.String(), values...).Error
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) AlterColumn(value interface{}, field string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if stmt.Schema != nil {
|
||||
if field := stmt.Schema.LookUpField(field); field != nil {
|
||||
fullDataType := m.FullDataTypeOf(field)
|
||||
if m.Dialector.DontSupportRenameColumnUnique {
|
||||
fullDataType.SQL = strings.Replace(fullDataType.SQL, " UNIQUE ", " ", 1)
|
||||
}
|
||||
|
||||
return m.DB.Exec(
|
||||
"ALTER TABLE ? MODIFY COLUMN ? ?",
|
||||
m.CurrentTable(stmt), clause.Column{Name: field.DBName}, fullDataType,
|
||||
).Error
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to look up field with name: %s", field)
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) TiDBVersion() (isTiDB bool, major, minor, patch int, err error) {
|
||||
// TiDB version string looks like:
|
||||
// "5.7.25-TiDB-v6.5.0" or "5.7.25-TiDB-v6.4.0-serverless"
|
||||
tidbVersionArray := strings.Split(m.Dialector.ServerVersion, "-")
|
||||
if len(tidbVersionArray) < 3 || tidbVersionArray[1] != "TiDB" {
|
||||
// It isn't TiDB
|
||||
return
|
||||
}
|
||||
|
||||
rawVersion := strings.TrimPrefix(tidbVersionArray[2], "v")
|
||||
realVersionArray := strings.Split(rawVersion, ".")
|
||||
if major, err = strconv.Atoi(realVersionArray[0]); err != nil {
|
||||
err = fmt.Errorf("failed to parse the version of TiDB, the major version is: %s", realVersionArray[0])
|
||||
return
|
||||
}
|
||||
|
||||
if minor, err = strconv.Atoi(realVersionArray[1]); err != nil {
|
||||
err = fmt.Errorf("failed to parse the version of TiDB, the minor version is: %s", realVersionArray[1])
|
||||
return
|
||||
}
|
||||
|
||||
if patch, err = strconv.Atoi(realVersionArray[2]); err != nil {
|
||||
err = fmt.Errorf("failed to parse the version of TiDB, the patch version is: %s", realVersionArray[2])
|
||||
return
|
||||
}
|
||||
|
||||
isTiDB = true
|
||||
return
|
||||
}
|
||||
|
||||
func (m Migrator) RenameColumn(value interface{}, oldName, newName string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if !m.Dialector.DontSupportRenameColumn {
|
||||
return m.Migrator.RenameColumn(value, oldName, newName)
|
||||
}
|
||||
|
||||
var field *schema.Field
|
||||
if stmt.Schema != nil {
|
||||
if f := stmt.Schema.LookUpField(oldName); f != nil {
|
||||
oldName = f.DBName
|
||||
field = f
|
||||
}
|
||||
|
||||
if f := stmt.Schema.LookUpField(newName); f != nil {
|
||||
newName = f.DBName
|
||||
field = f
|
||||
}
|
||||
}
|
||||
|
||||
if field != nil {
|
||||
return m.DB.Exec(
|
||||
"ALTER TABLE ? CHANGE ? ? ?",
|
||||
m.CurrentTable(stmt), clause.Column{Name: oldName},
|
||||
clause.Column{Name: newName}, m.FullDataTypeOf(field),
|
||||
).Error
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to look up field with name: %s", newName)
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) DropConstraint(value interface{}, name string) error {
|
||||
if !m.Dialector.Config.DontSupportDropConstraint {
|
||||
return m.Migrator.DropConstraint(value, name)
|
||||
}
|
||||
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
constraint, table := m.GuessConstraintInterfaceAndTable(stmt, name)
|
||||
if constraint != nil {
|
||||
name = constraint.GetName()
|
||||
switch constraint.(type) {
|
||||
case *schema.Constraint:
|
||||
return m.DB.Exec("ALTER TABLE ? DROP FOREIGN KEY ?", clause.Table{Name: table}, clause.Column{Name: name}).Error
|
||||
case *schema.CheckConstraint:
|
||||
return m.DB.Exec("ALTER TABLE ? DROP CHECK ?", clause.Table{Name: table}, clause.Column{Name: name}).Error
|
||||
}
|
||||
}
|
||||
if m.HasIndex(value, name) {
|
||||
return m.DB.Exec("ALTER TABLE ? DROP INDEX ?", clause.Table{Name: table}, clause.Column{Name: name}).Error
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) RenameIndex(value interface{}, oldName, newName string) error {
|
||||
if !m.Dialector.DontSupportRenameIndex {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
return m.DB.Exec(
|
||||
"ALTER TABLE ? RENAME INDEX ? TO ?",
|
||||
m.CurrentTable(stmt), clause.Column{Name: oldName}, clause.Column{Name: newName},
|
||||
).Error
|
||||
})
|
||||
}
|
||||
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
err := m.DropIndex(value, oldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if stmt.Schema != nil {
|
||||
if idx := stmt.Schema.LookIndex(newName); idx == nil {
|
||||
if idx = stmt.Schema.LookIndex(oldName); idx != nil {
|
||||
opts := m.BuildIndexOptions(idx.Fields, stmt)
|
||||
values := []interface{}{clause.Column{Name: newName}, m.CurrentTable(stmt), opts}
|
||||
|
||||
createIndexSQL := "CREATE "
|
||||
if idx.Class != "" {
|
||||
createIndexSQL += idx.Class + " "
|
||||
}
|
||||
createIndexSQL += "INDEX ? ON ??"
|
||||
|
||||
if idx.Type != "" {
|
||||
createIndexSQL += " USING " + idx.Type
|
||||
}
|
||||
|
||||
return m.DB.Exec(createIndexSQL, values...).Error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m.CreateIndex(value, newName)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (m Migrator) DropTable(values ...interface{}) error {
|
||||
values = m.ReorderModels(values, false)
|
||||
return m.DB.Connection(func(tx *gorm.DB) error {
|
||||
tx.Exec("SET FOREIGN_KEY_CHECKS = 0;")
|
||||
for i := len(values) - 1; i >= 0; i-- {
|
||||
if err := m.RunWithValue(values[i], func(stmt *gorm.Statement) error {
|
||||
return tx.Exec("DROP TABLE IF EXISTS ? CASCADE", m.CurrentTable(stmt)).Error
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Exec("SET FOREIGN_KEY_CHECKS = 1;").Error
|
||||
})
|
||||
}
|
||||
|
||||
// ColumnTypes column types return columnTypes,error
|
||||
func (m Migrator) ColumnTypes(value interface{}) ([]gorm.ColumnType, error) {
|
||||
columnTypes := make([]gorm.ColumnType, 0)
|
||||
err := m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
var (
|
||||
currentDatabase, table = m.CurrentSchema(stmt, stmt.Table)
|
||||
columnTypeSQL = "SELECT column_name, column_default, is_nullable = 'YES', data_type, character_maximum_length, column_type, column_key, extra, column_comment, numeric_precision, numeric_scale "
|
||||
rows, err = m.DB.Session(&gorm.Session{}).Table(table).Limit(1).Rows()
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawColumnTypes, err := rows.ColumnTypes()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rows.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !m.DisableDatetimePrecision {
|
||||
columnTypeSQL += ", datetime_precision "
|
||||
}
|
||||
columnTypeSQL += "FROM information_schema.columns WHERE table_schema = ? AND table_name = ? ORDER BY ORDINAL_POSITION"
|
||||
|
||||
columns, rowErr := m.DB.Table(table).Raw(columnTypeSQL, currentDatabase, table).Rows()
|
||||
if rowErr != nil {
|
||||
return rowErr
|
||||
}
|
||||
|
||||
defer columns.Close()
|
||||
|
||||
for columns.Next() {
|
||||
var (
|
||||
column migrator.ColumnType
|
||||
datetimePrecision sql.NullInt64
|
||||
extraValue sql.NullString
|
||||
columnKey sql.NullString
|
||||
values = []interface{}{
|
||||
&column.NameValue, &column.DefaultValueValue, &column.NullableValue, &column.DataTypeValue, &column.LengthValue, &column.ColumnTypeValue, &columnKey, &extraValue, &column.CommentValue, &column.DecimalSizeValue, &column.ScaleValue,
|
||||
}
|
||||
)
|
||||
|
||||
if !m.DisableDatetimePrecision {
|
||||
values = append(values, &datetimePrecision)
|
||||
}
|
||||
|
||||
if scanErr := columns.Scan(values...); scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
|
||||
column.PrimaryKeyValue = sql.NullBool{Bool: false, Valid: true}
|
||||
column.UniqueValue = sql.NullBool{Bool: false, Valid: true}
|
||||
switch columnKey.String {
|
||||
case "PRI":
|
||||
column.PrimaryKeyValue = sql.NullBool{Bool: true, Valid: true}
|
||||
case "UNI":
|
||||
column.UniqueValue = sql.NullBool{Bool: true, Valid: true}
|
||||
}
|
||||
|
||||
if strings.Contains(extraValue.String, "auto_increment") {
|
||||
column.AutoIncrementValue = sql.NullBool{Bool: true, Valid: true}
|
||||
}
|
||||
|
||||
// only trim paired single-quotes
|
||||
s := column.DefaultValueValue.String
|
||||
for (len(s) >= 3 && s[0] == '\'' && s[len(s)-1] == '\'' && s[len(s)-2] != '\\') ||
|
||||
(len(s) == 2 && s == "''") {
|
||||
s = s[1 : len(s)-1]
|
||||
}
|
||||
column.DefaultValueValue.String = s
|
||||
if m.Dialector.DontSupportNullAsDefaultValue {
|
||||
// rewrite mariadb default value like other version
|
||||
if column.DefaultValueValue.Valid && column.DefaultValueValue.String == "NULL" {
|
||||
column.DefaultValueValue.Valid = false
|
||||
column.DefaultValueValue.String = ""
|
||||
}
|
||||
}
|
||||
|
||||
if datetimePrecision.Valid {
|
||||
column.DecimalSizeValue = datetimePrecision
|
||||
}
|
||||
|
||||
for _, c := range rawColumnTypes {
|
||||
if c.Name() == column.NameValue.String {
|
||||
column.SQLColumnType = c
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
columnTypes = append(columnTypes, column)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return columnTypes, err
|
||||
}
|
||||
|
||||
func (m Migrator) CurrentDatabase() (name string) {
|
||||
baseName := m.Migrator.CurrentDatabase()
|
||||
m.DB.Raw(
|
||||
"SELECT SCHEMA_NAME from Information_schema.SCHEMATA where SCHEMA_NAME LIKE ? ORDER BY SCHEMA_NAME=? DESC,SCHEMA_NAME limit 1",
|
||||
baseName+"%", baseName).Scan(&name)
|
||||
return
|
||||
}
|
||||
|
||||
func (m Migrator) GetTables() (tableList []string, err error) {
|
||||
err = m.DB.Raw("SELECT TABLE_NAME FROM information_schema.tables where TABLE_SCHEMA=?", m.CurrentDatabase()).
|
||||
Scan(&tableList).Error
|
||||
return
|
||||
}
|
||||
|
||||
func (m Migrator) GetIndexes(value interface{}) ([]gorm.Index, error) {
|
||||
indexes := make([]gorm.Index, 0)
|
||||
err := m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
|
||||
result := make([]*Index, 0)
|
||||
schema, table := m.CurrentSchema(stmt, stmt.Table)
|
||||
scanErr := m.DB.Table(table).Raw(indexSql, schema, table).Scan(&result).Error
|
||||
if scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
indexMap, indexNames := groupByIndexName(result)
|
||||
|
||||
for _, name := range indexNames {
|
||||
idx := indexMap[name]
|
||||
if len(idx) == 0 {
|
||||
continue
|
||||
}
|
||||
tempIdx := &migrator.Index{
|
||||
TableName: idx[0].TableName,
|
||||
NameValue: idx[0].IndexName,
|
||||
PrimaryKeyValue: sql.NullBool{
|
||||
Bool: idx[0].IndexName == "PRIMARY",
|
||||
Valid: true,
|
||||
},
|
||||
UniqueValue: sql.NullBool{
|
||||
Bool: idx[0].NonUnique == 0,
|
||||
Valid: true,
|
||||
},
|
||||
}
|
||||
for _, x := range idx {
|
||||
tempIdx.ColumnList = append(tempIdx.ColumnList, x.ColumnName)
|
||||
}
|
||||
indexes = append(indexes, tempIdx)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return indexes, err
|
||||
}
|
||||
|
||||
// Index table index info
|
||||
type Index struct {
|
||||
TableName string `gorm:"column:TABLE_NAME"`
|
||||
ColumnName string `gorm:"column:COLUMN_NAME"`
|
||||
IndexName string `gorm:"column:INDEX_NAME"`
|
||||
NonUnique int32 `gorm:"column:NON_UNIQUE"`
|
||||
}
|
||||
|
||||
func groupByIndexName(indexList []*Index) (map[string][]*Index, []string) {
|
||||
columnIndexMap := make(map[string][]*Index, len(indexList))
|
||||
indexNames := make([]string, 0, len(indexList))
|
||||
for _, idx := range indexList {
|
||||
if _, ok := columnIndexMap[idx.IndexName]; !ok {
|
||||
indexNames = append(indexNames, idx.IndexName)
|
||||
}
|
||||
columnIndexMap[idx.IndexName] = append(columnIndexMap[idx.IndexName], idx)
|
||||
}
|
||||
return columnIndexMap, indexNames
|
||||
}
|
||||
|
||||
func (m Migrator) CurrentSchema(stmt *gorm.Statement, table string) (string, string) {
|
||||
if tables := strings.Split(table, `.`); len(tables) == 2 {
|
||||
return tables[0], tables[1]
|
||||
}
|
||||
m.DB = m.DB.Table(table)
|
||||
return m.CurrentDatabase(), table
|
||||
}
|
||||
|
||||
func (m Migrator) GetTypeAliases(databaseTypeName string) []string {
|
||||
return typeAliasMap[databaseTypeName]
|
||||
}
|
||||
|
||||
// TableType table type return tableType,error
|
||||
func (m Migrator) TableType(value interface{}) (tableType gorm.TableType, err error) {
|
||||
var table migrator.TableType
|
||||
|
||||
err = m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
var (
|
||||
values = []interface{}{
|
||||
&table.SchemaValue, &table.NameValue, &table.TypeValue, &table.CommentValue,
|
||||
}
|
||||
currentDatabase, tableName = m.CurrentSchema(stmt, stmt.Table)
|
||||
tableTypeSQL = "SELECT table_schema, table_name, table_type, table_comment FROM information_schema.tables WHERE table_schema = ? AND table_name = ?"
|
||||
)
|
||||
|
||||
row := m.DB.Table(tableName).Raw(tableTypeSQL, currentDatabase, tableName).Row()
|
||||
|
||||
if scanErr := row.Scan(values...); scanErr != nil {
|
||||
return scanErr
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return table, err
|
||||
}
|
||||
542
vendor/gorm.io/driver/mysql/mysql.go
generated
vendored
Normal file
542
vendor/gorm.io/driver/mysql/mysql.go
generated
vendored
Normal file
@@ -0,0 +1,542 @@
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-sql-driver/mysql"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/callbacks"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/migrator"
|
||||
"gorm.io/gorm/schema"
|
||||
"gorm.io/gorm/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDriverName = "mysql"
|
||||
|
||||
AutoRandomTag = "auto_random()" // Treated as an auto_random field for tidb
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
DriverName string
|
||||
ServerVersion string
|
||||
DSN string
|
||||
DSNConfig *mysql.Config
|
||||
Conn gorm.ConnPool
|
||||
SkipInitializeWithVersion bool
|
||||
DefaultStringSize uint
|
||||
DefaultDatetimePrecision *int
|
||||
DisableWithReturning bool
|
||||
DisableDatetimePrecision bool
|
||||
DontSupportRenameIndex bool
|
||||
DontSupportRenameColumn bool
|
||||
DontSupportForShareClause bool
|
||||
DontSupportNullAsDefaultValue bool
|
||||
DontSupportRenameColumnUnique bool
|
||||
// As of MySQL 8.0.19, ALTER TABLE permits more general (and SQL standard) syntax
|
||||
// for dropping and altering existing constraints of any type.
|
||||
// see https://dev.mysql.com/doc/refman/8.0/en/alter-table.html
|
||||
DontSupportDropConstraint bool
|
||||
}
|
||||
|
||||
type Dialector struct {
|
||||
*Config
|
||||
}
|
||||
|
||||
var (
|
||||
// CreateClauses create clauses
|
||||
CreateClauses = []string{"INSERT", "VALUES", "ON CONFLICT"}
|
||||
// QueryClauses query clauses
|
||||
QueryClauses = []string{}
|
||||
// UpdateClauses update clauses
|
||||
UpdateClauses = []string{"UPDATE", "SET", "WHERE", "ORDER BY", "LIMIT"}
|
||||
// DeleteClauses delete clauses
|
||||
DeleteClauses = []string{"DELETE", "FROM", "WHERE", "ORDER BY", "LIMIT"}
|
||||
|
||||
defaultDatetimePrecision = 3
|
||||
)
|
||||
|
||||
func Open(dsn string) gorm.Dialector {
|
||||
dsnConf, _ := mysql.ParseDSN(dsn)
|
||||
return &Dialector{Config: &Config{DSN: dsn, DSNConfig: dsnConf}}
|
||||
}
|
||||
|
||||
func New(config Config) gorm.Dialector {
|
||||
switch {
|
||||
case config.DSN == "" && config.DSNConfig != nil:
|
||||
config.DSN = config.DSNConfig.FormatDSN()
|
||||
case config.DSN != "" && config.DSNConfig == nil:
|
||||
config.DSNConfig, _ = mysql.ParseDSN(config.DSN)
|
||||
}
|
||||
return &Dialector{Config: &config}
|
||||
}
|
||||
|
||||
func (dialector Dialector) Name() string {
|
||||
return DefaultDriverName
|
||||
}
|
||||
|
||||
// NowFunc return now func
|
||||
func (dialector Dialector) NowFunc(n int) func() time.Time {
|
||||
return func() time.Time {
|
||||
round := time.Second / time.Duration(math.Pow10(n))
|
||||
return time.Now().Round(round)
|
||||
}
|
||||
}
|
||||
|
||||
func (dialector Dialector) Apply(config *gorm.Config) error {
|
||||
if config.NowFunc != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if dialector.DefaultDatetimePrecision == nil {
|
||||
dialector.DefaultDatetimePrecision = &defaultDatetimePrecision
|
||||
}
|
||||
// while maintaining the readability of the code, separate the business logic from
|
||||
// the general part and leave it to the function to do it here.
|
||||
config.NowFunc = dialector.NowFunc(*dialector.DefaultDatetimePrecision)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialector Dialector) Initialize(db *gorm.DB) (err error) {
|
||||
if dialector.DriverName == "" {
|
||||
dialector.DriverName = DefaultDriverName
|
||||
}
|
||||
|
||||
if dialector.DefaultDatetimePrecision == nil {
|
||||
dialector.DefaultDatetimePrecision = &defaultDatetimePrecision
|
||||
}
|
||||
|
||||
if dialector.Conn != nil {
|
||||
db.ConnPool = dialector.Conn
|
||||
} else {
|
||||
db.ConnPool, err = sql.Open(dialector.DriverName, dialector.DSN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
withReturning := false
|
||||
if !dialector.Config.SkipInitializeWithVersion {
|
||||
err = db.ConnPool.QueryRowContext(context.Background(), "SELECT VERSION()").Scan(&dialector.ServerVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.Contains(dialector.ServerVersion, "MariaDB") {
|
||||
dialector.Config.DontSupportRenameIndex = true
|
||||
dialector.Config.DontSupportRenameColumn = true
|
||||
dialector.Config.DontSupportForShareClause = true
|
||||
dialector.Config.DontSupportNullAsDefaultValue = true
|
||||
withReturning = checkVersion(dialector.ServerVersion, "10.5")
|
||||
} else if strings.HasPrefix(dialector.ServerVersion, "5.6.") {
|
||||
dialector.Config.DontSupportRenameIndex = true
|
||||
dialector.Config.DontSupportRenameColumn = true
|
||||
dialector.Config.DontSupportForShareClause = true
|
||||
dialector.Config.DontSupportDropConstraint = true
|
||||
} else if strings.HasPrefix(dialector.ServerVersion, "5.7.") {
|
||||
dialector.Config.DontSupportRenameColumn = true
|
||||
dialector.Config.DontSupportForShareClause = true
|
||||
dialector.Config.DontSupportDropConstraint = true
|
||||
} else if strings.HasPrefix(dialector.ServerVersion, "5.") {
|
||||
dialector.Config.DisableDatetimePrecision = true
|
||||
dialector.Config.DontSupportRenameIndex = true
|
||||
dialector.Config.DontSupportRenameColumn = true
|
||||
dialector.Config.DontSupportForShareClause = true
|
||||
dialector.Config.DontSupportDropConstraint = true
|
||||
}
|
||||
|
||||
if strings.Contains(dialector.ServerVersion, "TiDB") {
|
||||
dialector.Config.DontSupportRenameColumnUnique = true
|
||||
}
|
||||
}
|
||||
|
||||
// register callbacks
|
||||
callbackConfig := &callbacks.Config{
|
||||
CreateClauses: CreateClauses,
|
||||
QueryClauses: QueryClauses,
|
||||
UpdateClauses: UpdateClauses,
|
||||
DeleteClauses: DeleteClauses,
|
||||
}
|
||||
|
||||
if !dialector.Config.DisableWithReturning && withReturning {
|
||||
if !utils.Contains(callbackConfig.CreateClauses, "RETURNING") {
|
||||
callbackConfig.CreateClauses = append(callbackConfig.CreateClauses, "RETURNING")
|
||||
}
|
||||
|
||||
if !utils.Contains(callbackConfig.UpdateClauses, "RETURNING") {
|
||||
callbackConfig.UpdateClauses = append(callbackConfig.UpdateClauses, "RETURNING")
|
||||
}
|
||||
|
||||
if !utils.Contains(callbackConfig.DeleteClauses, "RETURNING") {
|
||||
callbackConfig.DeleteClauses = append(callbackConfig.DeleteClauses, "RETURNING")
|
||||
}
|
||||
}
|
||||
|
||||
callbacks.RegisterDefaultCallbacks(db, callbackConfig)
|
||||
|
||||
for k, v := range dialector.ClauseBuilders() {
|
||||
if _, ok := db.ClauseBuilders[k]; !ok {
|
||||
db.ClauseBuilders[k] = v
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
// ClauseOnConflict for clause.ClauseBuilder ON CONFLICT key
|
||||
ClauseOnConflict = "ON CONFLICT"
|
||||
// ClauseValues for clause.ClauseBuilder VALUES key
|
||||
ClauseValues = "VALUES"
|
||||
// ClauseFor for clause.ClauseBuilder FOR key
|
||||
ClauseFor = "FOR"
|
||||
)
|
||||
|
||||
func (dialector Dialector) ClauseBuilders() map[string]clause.ClauseBuilder {
|
||||
clauseBuilders := map[string]clause.ClauseBuilder{
|
||||
ClauseOnConflict: func(c clause.Clause, builder clause.Builder) {
|
||||
onConflict, ok := c.Expression.(clause.OnConflict)
|
||||
if !ok {
|
||||
c.Build(builder)
|
||||
return
|
||||
}
|
||||
|
||||
builder.WriteString("ON DUPLICATE KEY UPDATE ")
|
||||
if len(onConflict.DoUpdates) == 0 {
|
||||
if s := builder.(*gorm.Statement).Schema; s != nil {
|
||||
var column clause.Column
|
||||
onConflict.DoNothing = false
|
||||
|
||||
if s.PrioritizedPrimaryField != nil {
|
||||
column = clause.Column{Name: s.PrioritizedPrimaryField.DBName}
|
||||
} else if len(s.DBNames) > 0 {
|
||||
column = clause.Column{Name: s.DBNames[0]}
|
||||
}
|
||||
|
||||
if column.Name != "" {
|
||||
onConflict.DoUpdates = []clause.Assignment{{Column: column, Value: column}}
|
||||
}
|
||||
|
||||
builder.(*gorm.Statement).AddClause(onConflict)
|
||||
}
|
||||
}
|
||||
|
||||
for idx, assignment := range onConflict.DoUpdates {
|
||||
if idx > 0 {
|
||||
builder.WriteByte(',')
|
||||
}
|
||||
|
||||
builder.WriteQuoted(assignment.Column)
|
||||
builder.WriteByte('=')
|
||||
if column, ok := assignment.Value.(clause.Column); ok && column.Table == "excluded" {
|
||||
column.Table = ""
|
||||
builder.WriteString("VALUES(")
|
||||
builder.WriteQuoted(column)
|
||||
builder.WriteByte(')')
|
||||
} else {
|
||||
builder.AddVar(builder, assignment.Value)
|
||||
}
|
||||
}
|
||||
},
|
||||
ClauseValues: func(c clause.Clause, builder clause.Builder) {
|
||||
if values, ok := c.Expression.(clause.Values); ok && len(values.Columns) == 0 {
|
||||
builder.WriteString("VALUES()")
|
||||
return
|
||||
}
|
||||
c.Build(builder)
|
||||
},
|
||||
}
|
||||
|
||||
if dialector.Config.DontSupportForShareClause {
|
||||
clauseBuilders[ClauseFor] = func(c clause.Clause, builder clause.Builder) {
|
||||
if values, ok := c.Expression.(clause.Locking); ok && strings.EqualFold(values.Strength, "SHARE") {
|
||||
builder.WriteString("LOCK IN SHARE MODE")
|
||||
return
|
||||
}
|
||||
c.Build(builder)
|
||||
}
|
||||
}
|
||||
|
||||
return clauseBuilders
|
||||
}
|
||||
|
||||
func (dialector Dialector) DefaultValueOf(field *schema.Field) clause.Expression {
|
||||
return clause.Expr{SQL: "DEFAULT"}
|
||||
}
|
||||
|
||||
func (dialector Dialector) Migrator(db *gorm.DB) gorm.Migrator {
|
||||
return Migrator{
|
||||
Migrator: migrator.Migrator{
|
||||
Config: migrator.Config{
|
||||
DB: db,
|
||||
Dialector: dialector,
|
||||
},
|
||||
},
|
||||
Dialector: dialector,
|
||||
}
|
||||
}
|
||||
|
||||
func (dialector Dialector) BindVarTo(writer clause.Writer, stmt *gorm.Statement, v interface{}) {
|
||||
writer.WriteByte('?')
|
||||
}
|
||||
|
||||
func (dialector Dialector) QuoteTo(writer clause.Writer, str string) {
|
||||
var (
|
||||
underQuoted, selfQuoted bool
|
||||
continuousBacktick int8
|
||||
shiftDelimiter int8
|
||||
)
|
||||
|
||||
for _, v := range []byte(str) {
|
||||
switch v {
|
||||
case '`':
|
||||
continuousBacktick++
|
||||
if continuousBacktick == 2 {
|
||||
writer.WriteString("``")
|
||||
continuousBacktick = 0
|
||||
}
|
||||
case '.':
|
||||
if continuousBacktick > 0 || !selfQuoted {
|
||||
shiftDelimiter = 0
|
||||
underQuoted = false
|
||||
continuousBacktick = 0
|
||||
writer.WriteByte('`')
|
||||
}
|
||||
writer.WriteByte(v)
|
||||
continue
|
||||
default:
|
||||
if shiftDelimiter-continuousBacktick <= 0 && !underQuoted {
|
||||
writer.WriteByte('`')
|
||||
underQuoted = true
|
||||
if selfQuoted = continuousBacktick > 0; selfQuoted {
|
||||
continuousBacktick -= 1
|
||||
}
|
||||
}
|
||||
|
||||
for ; continuousBacktick > 0; continuousBacktick -= 1 {
|
||||
writer.WriteString("``")
|
||||
}
|
||||
|
||||
writer.WriteByte(v)
|
||||
}
|
||||
shiftDelimiter++
|
||||
}
|
||||
|
||||
if continuousBacktick > 0 && !selfQuoted {
|
||||
writer.WriteString("``")
|
||||
}
|
||||
writer.WriteByte('`')
|
||||
}
|
||||
|
||||
type localTimeInterface interface {
|
||||
In(loc *time.Location) time.Time
|
||||
}
|
||||
|
||||
func (dialector Dialector) Explain(sql string, vars ...interface{}) string {
|
||||
if dialector.DSNConfig != nil && dialector.DSNConfig.Loc != nil {
|
||||
for i, v := range vars {
|
||||
if p, ok := v.(localTimeInterface); ok {
|
||||
func(i int, t localTimeInterface) {
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
vars[i] = t.In(dialector.DSNConfig.Loc)
|
||||
}(i, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return logger.ExplainSQL(sql, nil, `'`, vars...)
|
||||
}
|
||||
|
||||
func (dialector Dialector) DataTypeOf(field *schema.Field) string {
|
||||
switch field.DataType {
|
||||
case schema.Bool:
|
||||
return "boolean"
|
||||
case schema.Int, schema.Uint:
|
||||
return dialector.getSchemaIntAndUnitType(field)
|
||||
case schema.Float:
|
||||
return dialector.getSchemaFloatType(field)
|
||||
case schema.String:
|
||||
return dialector.getSchemaStringType(field)
|
||||
case schema.Time:
|
||||
return dialector.getSchemaTimeType(field)
|
||||
case schema.Bytes:
|
||||
return dialector.getSchemaBytesType(field)
|
||||
default:
|
||||
return dialector.getSchemaCustomType(field)
|
||||
}
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaFloatType(field *schema.Field) string {
|
||||
if field.Precision > 0 {
|
||||
return fmt.Sprintf("decimal(%d, %d)", field.Precision, field.Scale)
|
||||
}
|
||||
|
||||
if field.Size <= 32 {
|
||||
return "float"
|
||||
}
|
||||
|
||||
return "double"
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaStringType(field *schema.Field) string {
|
||||
size := field.Size
|
||||
if size == 0 {
|
||||
if dialector.DefaultStringSize > 0 {
|
||||
size = int(dialector.DefaultStringSize)
|
||||
} else {
|
||||
hasIndex := field.TagSettings["INDEX"] != "" || field.TagSettings["UNIQUE"] != ""
|
||||
// TEXT, GEOMETRY or JSON column can't have a default value
|
||||
if field.PrimaryKey || field.HasDefaultValue || hasIndex {
|
||||
size = 191 // utf8mb4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if size >= 65536 && size <= int(math.Pow(2, 24)) {
|
||||
return "mediumtext"
|
||||
}
|
||||
|
||||
if size > int(math.Pow(2, 24)) || size <= 0 {
|
||||
return "longtext"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("varchar(%d)", size)
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaTimeType(field *schema.Field) string {
|
||||
if !dialector.DisableDatetimePrecision && field.Precision == 0 && field.TagSettings["PRECISION"] == "" {
|
||||
field.Precision = *dialector.DefaultDatetimePrecision
|
||||
}
|
||||
|
||||
var precision string
|
||||
if field.Precision > 0 {
|
||||
precision = fmt.Sprintf("(%d)", field.Precision)
|
||||
}
|
||||
|
||||
if field.NotNull || field.PrimaryKey {
|
||||
return "datetime" + precision
|
||||
}
|
||||
return "datetime" + precision + " NULL"
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaBytesType(field *schema.Field) string {
|
||||
if field.Size > 0 && field.Size < 65536 {
|
||||
return fmt.Sprintf("varbinary(%d)", field.Size)
|
||||
}
|
||||
|
||||
if field.Size >= 65536 && field.Size <= int(math.Pow(2, 24)) {
|
||||
return "mediumblob"
|
||||
}
|
||||
|
||||
return "longblob"
|
||||
}
|
||||
|
||||
// autoRandomType
|
||||
// field.DataType MUST be `schema.Int` or `schema.Uint`
|
||||
// Judgement logic:
|
||||
// 1. Is PrimaryKey;
|
||||
// 2. Has default value;
|
||||
// 3. Default value is "auto_random()";
|
||||
// 4. IGNORE the field.Size, it MUST be bigint;
|
||||
// 5. CLEAR the default tag, and return true;
|
||||
// 6. Otherwise, return false.
|
||||
func autoRandomType(field *schema.Field) (bool, string) {
|
||||
if field.PrimaryKey && field.HasDefaultValue &&
|
||||
strings.ToLower(strings.TrimSpace(field.DefaultValue)) == AutoRandomTag {
|
||||
field.DefaultValue = ""
|
||||
|
||||
sqlType := "bigint"
|
||||
if field.DataType == schema.Uint {
|
||||
sqlType += " unsigned"
|
||||
}
|
||||
sqlType += " auto_random"
|
||||
return true, sqlType
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaIntAndUnitType(field *schema.Field) string {
|
||||
if autoRandom, typeString := autoRandomType(field); autoRandom {
|
||||
return typeString
|
||||
}
|
||||
|
||||
constraint := func(sqlType string) string {
|
||||
if field.DataType == schema.Uint {
|
||||
sqlType += " unsigned"
|
||||
}
|
||||
if field.AutoIncrement {
|
||||
sqlType += " AUTO_INCREMENT"
|
||||
}
|
||||
return sqlType
|
||||
}
|
||||
|
||||
switch {
|
||||
case field.Size <= 8:
|
||||
return constraint("tinyint")
|
||||
case field.Size <= 16:
|
||||
return constraint("smallint")
|
||||
case field.Size <= 24:
|
||||
return constraint("mediumint")
|
||||
case field.Size <= 32:
|
||||
return constraint("int")
|
||||
default:
|
||||
return constraint("bigint")
|
||||
}
|
||||
}
|
||||
|
||||
func (dialector Dialector) getSchemaCustomType(field *schema.Field) string {
|
||||
sqlType := string(field.DataType)
|
||||
|
||||
if field.AutoIncrement && !strings.Contains(strings.ToLower(sqlType), " auto_increment") {
|
||||
sqlType += " AUTO_INCREMENT"
|
||||
}
|
||||
|
||||
return sqlType
|
||||
}
|
||||
|
||||
func (dialector Dialector) SavePoint(tx *gorm.DB, name string) error {
|
||||
return tx.Exec("SAVEPOINT " + name).Error
|
||||
}
|
||||
|
||||
func (dialector Dialector) RollbackTo(tx *gorm.DB, name string) error {
|
||||
return tx.Exec("ROLLBACK TO SAVEPOINT " + name).Error
|
||||
}
|
||||
|
||||
// checkVersion newer or equal returns true, old returns false
|
||||
func checkVersion(newVersion, oldVersion string) bool {
|
||||
if newVersion == oldVersion {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
versionTrimmerRegexp = regexp.MustCompile(`^(\d+).*$`)
|
||||
|
||||
newVersions = strings.Split(newVersion, ".")
|
||||
oldVersions = strings.Split(oldVersion, ".")
|
||||
)
|
||||
for idx, nv := range newVersions {
|
||||
if len(oldVersions) <= idx {
|
||||
return true
|
||||
}
|
||||
|
||||
nvi, _ := strconv.Atoi(versionTrimmerRegexp.ReplaceAllString(nv, "$1"))
|
||||
ovi, _ := strconv.Atoi(versionTrimmerRegexp.ReplaceAllString(oldVersions[idx], "$1"))
|
||||
if nvi == ovi {
|
||||
continue
|
||||
}
|
||||
return nvi > ovi
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
1
vendor/gorm.io/driver/sqlite/.gitignore
generated
vendored
Normal file
1
vendor/gorm.io/driver/sqlite/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.idea/
|
||||
21
vendor/gorm.io/driver/sqlite/License
generated
vendored
Normal file
21
vendor/gorm.io/driver/sqlite/License
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
30
vendor/gorm.io/driver/sqlite/README.md
generated
vendored
Normal file
30
vendor/gorm.io/driver/sqlite/README.md
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# GORM Sqlite Driver
|
||||
|
||||

|
||||
|
||||
## USAGE
|
||||
|
||||
```go
|
||||
import (
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// github.com/mattn/go-sqlite3
|
||||
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{})
|
||||
```
|
||||
|
||||
Checkout [https://gorm.io](https://gorm.io) for details.
|
||||
|
||||
### Pure go Sqlite Driver
|
||||
|
||||
checkout [https://github.com/glebarez/sqlite](https://github.com/glebarez/sqlite) for details
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/glebarez/sqlite"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
db, err := gorm.Open(sqlite.Open("gorm.db"), &gorm.Config{})
|
||||
```
|
||||
285
vendor/gorm.io/driver/sqlite/ddlmod.go
generated
vendored
Normal file
285
vendor/gorm.io/driver/sqlite/ddlmod.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"gorm.io/gorm/migrator"
|
||||
)
|
||||
|
||||
var (
|
||||
sqliteSeparator = "`|\"|'|\t"
|
||||
uniqueRegexp = regexp.MustCompile(fmt.Sprintf(`^CONSTRAINT [%v]?[\w-]+[%v]? UNIQUE (.*)$`, sqliteSeparator, sqliteSeparator))
|
||||
indexRegexp = regexp.MustCompile(fmt.Sprintf(`(?is)CREATE(?: UNIQUE)? INDEX [%v]?[\w\d-]+[%v]?(?s:.*?)ON (.*)$`, sqliteSeparator, sqliteSeparator))
|
||||
tableRegexp = regexp.MustCompile(fmt.Sprintf(`(?is)(CREATE TABLE [%v]?[\w\d-]+[%v]?)(?:\s*\((.*)\))?`, sqliteSeparator, sqliteSeparator))
|
||||
separatorRegexp = regexp.MustCompile(fmt.Sprintf("[%v]", sqliteSeparator))
|
||||
columnRegexp = regexp.MustCompile(fmt.Sprintf(`^[%v]?([\w\d]+)[%v]?\s+([\w\(\)\d]+)(.*)$`, sqliteSeparator, sqliteSeparator))
|
||||
defaultValueRegexp = regexp.MustCompile(`(?i) DEFAULT \(?(.+)?\)?( |COLLATE|GENERATED|$)`)
|
||||
regRealDataType = regexp.MustCompile(`[^\d](\d+)[^\d]?`)
|
||||
)
|
||||
|
||||
type ddl struct {
|
||||
head string
|
||||
fields []string
|
||||
columns []migrator.ColumnType
|
||||
}
|
||||
|
||||
func parseDDL(strs ...string) (*ddl, error) {
|
||||
var result ddl
|
||||
for _, str := range strs {
|
||||
if sections := tableRegexp.FindStringSubmatch(str); len(sections) > 0 {
|
||||
var (
|
||||
ddlBody = sections[2]
|
||||
ddlBodyRunes = []rune(ddlBody)
|
||||
bracketLevel int
|
||||
quote rune
|
||||
buf string
|
||||
)
|
||||
ddlBodyRunesLen := len(ddlBodyRunes)
|
||||
|
||||
result.head = sections[1]
|
||||
|
||||
for idx := 0; idx < ddlBodyRunesLen; idx++ {
|
||||
var (
|
||||
next rune = 0
|
||||
c = ddlBodyRunes[idx]
|
||||
)
|
||||
if idx+1 < ddlBodyRunesLen {
|
||||
next = ddlBodyRunes[idx+1]
|
||||
}
|
||||
|
||||
if sc := string(c); separatorRegexp.MatchString(sc) {
|
||||
if c == next {
|
||||
buf += sc // Skip escaped quote
|
||||
idx++
|
||||
} else if quote > 0 {
|
||||
quote = 0
|
||||
} else {
|
||||
quote = c
|
||||
}
|
||||
} else if quote == 0 {
|
||||
if c == '(' {
|
||||
bracketLevel++
|
||||
} else if c == ')' {
|
||||
bracketLevel--
|
||||
} else if bracketLevel == 0 {
|
||||
if c == ',' {
|
||||
result.fields = append(result.fields, strings.TrimSpace(buf))
|
||||
buf = ""
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if bracketLevel < 0 {
|
||||
return nil, errors.New("invalid DDL, unbalanced brackets")
|
||||
}
|
||||
|
||||
buf += string(c)
|
||||
}
|
||||
|
||||
if bracketLevel != 0 {
|
||||
return nil, errors.New("invalid DDL, unbalanced brackets")
|
||||
}
|
||||
|
||||
if buf != "" {
|
||||
result.fields = append(result.fields, strings.TrimSpace(buf))
|
||||
}
|
||||
|
||||
for _, f := range result.fields {
|
||||
fUpper := strings.ToUpper(f)
|
||||
if strings.HasPrefix(fUpper, "CHECK") {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(fUpper, "CONSTRAINT") {
|
||||
matches := uniqueRegexp.FindStringSubmatch(f)
|
||||
if len(matches) > 0 {
|
||||
cols, err := parseAllColumns(matches[1])
|
||||
if err == nil && len(cols) == 1 {
|
||||
for idx, column := range result.columns {
|
||||
if column.NameValue.String == cols[0] {
|
||||
column.UniqueValue = sql.NullBool{Bool: true, Valid: true}
|
||||
result.columns[idx] = column
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(fUpper, "PRIMARY KEY") {
|
||||
cols, err := parseAllColumns(f)
|
||||
if err == nil {
|
||||
for _, name := range cols {
|
||||
for idx, column := range result.columns {
|
||||
if column.NameValue.String == name {
|
||||
column.PrimaryKeyValue = sql.NullBool{Bool: true, Valid: true}
|
||||
result.columns[idx] = column
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if matches := columnRegexp.FindStringSubmatch(f); len(matches) > 0 {
|
||||
columnType := migrator.ColumnType{
|
||||
NameValue: sql.NullString{String: matches[1], Valid: true},
|
||||
DataTypeValue: sql.NullString{String: matches[2], Valid: true},
|
||||
ColumnTypeValue: sql.NullString{String: matches[2], Valid: true},
|
||||
PrimaryKeyValue: sql.NullBool{Valid: true},
|
||||
UniqueValue: sql.NullBool{Valid: true},
|
||||
NullableValue: sql.NullBool{Bool: true, Valid: true},
|
||||
DefaultValueValue: sql.NullString{Valid: false},
|
||||
}
|
||||
|
||||
matchUpper := strings.ToUpper(matches[3])
|
||||
if strings.Contains(matchUpper, " NOT NULL") {
|
||||
columnType.NullableValue = sql.NullBool{Bool: false, Valid: true}
|
||||
} else if strings.Contains(matchUpper, " NULL") {
|
||||
columnType.NullableValue = sql.NullBool{Bool: true, Valid: true}
|
||||
}
|
||||
if strings.Contains(matchUpper, " UNIQUE") {
|
||||
columnType.UniqueValue = sql.NullBool{Bool: true, Valid: true}
|
||||
}
|
||||
if strings.Contains(matchUpper, " PRIMARY") {
|
||||
columnType.PrimaryKeyValue = sql.NullBool{Bool: true, Valid: true}
|
||||
}
|
||||
if defaultMatches := defaultValueRegexp.FindStringSubmatch(matches[3]); len(defaultMatches) > 1 {
|
||||
if strings.ToLower(defaultMatches[1]) != "null" {
|
||||
columnType.DefaultValueValue = sql.NullString{String: strings.Trim(defaultMatches[1], `"`), Valid: true}
|
||||
}
|
||||
}
|
||||
|
||||
// data type length
|
||||
matches := regRealDataType.FindAllStringSubmatch(columnType.DataTypeValue.String, -1)
|
||||
if len(matches) == 1 && len(matches[0]) == 2 {
|
||||
size, _ := strconv.Atoi(matches[0][1])
|
||||
columnType.LengthValue = sql.NullInt64{Valid: true, Int64: int64(size)}
|
||||
columnType.DataTypeValue.String = strings.TrimSuffix(columnType.DataTypeValue.String, matches[0][0])
|
||||
}
|
||||
|
||||
result.columns = append(result.columns, columnType)
|
||||
}
|
||||
}
|
||||
} else if matches := indexRegexp.FindStringSubmatch(str); len(matches) > 0 {
|
||||
// don't report Unique by UniqueIndex
|
||||
} else {
|
||||
return nil, errors.New("invalid DDL")
|
||||
}
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (d *ddl) clone() *ddl {
|
||||
copied := new(ddl)
|
||||
*copied = *d
|
||||
|
||||
copied.fields = make([]string, len(d.fields))
|
||||
copy(copied.fields, d.fields)
|
||||
copied.columns = make([]migrator.ColumnType, len(d.columns))
|
||||
copy(copied.columns, d.columns)
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
func (d *ddl) compile() string {
|
||||
if len(d.fields) == 0 {
|
||||
return d.head
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s (%s)", d.head, strings.Join(d.fields, ","))
|
||||
}
|
||||
|
||||
func (d *ddl) renameTable(dst, src string) error {
|
||||
tableReg, err := regexp.Compile("\\s*('|`|\")?\\b" + regexp.QuoteMeta(src) + "\\b('|`|\")?\\s*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
replaced := tableReg.ReplaceAllString(d.head, fmt.Sprintf(" `%s` ", dst))
|
||||
if replaced == d.head {
|
||||
return fmt.Errorf("failed to look up tablename `%s` from DDL head '%s'", src, d.head)
|
||||
}
|
||||
|
||||
d.head = replaced
|
||||
return nil
|
||||
}
|
||||
|
||||
func compileConstraintRegexp(name string) *regexp.Regexp {
|
||||
return regexp.MustCompile("^(?i:CONSTRAINT)\\s+[\"`]?" + regexp.QuoteMeta(name) + "[\"`\\s]")
|
||||
}
|
||||
|
||||
func (d *ddl) addConstraint(name string, sql string) {
|
||||
reg := compileConstraintRegexp(name)
|
||||
|
||||
for i := 0; i < len(d.fields); i++ {
|
||||
if reg.MatchString(d.fields[i]) {
|
||||
d.fields[i] = sql
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.fields = append(d.fields, sql)
|
||||
}
|
||||
|
||||
func (d *ddl) removeConstraint(name string) bool {
|
||||
reg := compileConstraintRegexp(name)
|
||||
|
||||
for i := 0; i < len(d.fields); i++ {
|
||||
if reg.MatchString(d.fields[i]) {
|
||||
d.fields = append(d.fields[:i], d.fields[i+1:]...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ddl) hasConstraint(name string) bool {
|
||||
reg := compileConstraintRegexp(name)
|
||||
|
||||
for _, f := range d.fields {
|
||||
if reg.MatchString(f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *ddl) getColumns() []string {
|
||||
res := []string{}
|
||||
|
||||
for _, f := range d.fields {
|
||||
fUpper := strings.ToUpper(f)
|
||||
if strings.HasPrefix(fUpper, "PRIMARY KEY") ||
|
||||
strings.HasPrefix(fUpper, "CHECK") ||
|
||||
strings.HasPrefix(fUpper, "CONSTRAINT") ||
|
||||
strings.Contains(fUpper, "GENERATED ALWAYS AS") {
|
||||
continue
|
||||
}
|
||||
|
||||
reg := regexp.MustCompile("^[\"`']?([\\w\\d]+)[\"`']?")
|
||||
match := reg.FindStringSubmatch(f)
|
||||
|
||||
if match != nil {
|
||||
res = append(res, "`"+match[1]+"`")
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (d *ddl) removeColumn(name string) bool {
|
||||
reg := regexp.MustCompile("^(`|'|\"| )" + regexp.QuoteMeta(name) + "(`|'|\"| ) .*?$")
|
||||
|
||||
for i := 0; i < len(d.fields); i++ {
|
||||
if reg.MatchString(d.fields[i]) {
|
||||
d.fields = append(d.fields[:i], d.fields[i+1:]...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
117
vendor/gorm.io/driver/sqlite/ddlmod_parse_all_columns.go
generated
vendored
Normal file
117
vendor/gorm.io/driver/sqlite/ddlmod_parse_all_columns.go
generated
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type parseAllColumnsState int
|
||||
|
||||
const (
|
||||
parseAllColumnsState_NONE parseAllColumnsState = iota
|
||||
parseAllColumnsState_Beginning
|
||||
parseAllColumnsState_ReadingRawName
|
||||
parseAllColumnsState_ReadingQuotedName
|
||||
parseAllColumnsState_EndOfName
|
||||
parseAllColumnsState_State_End
|
||||
)
|
||||
|
||||
func parseAllColumns(in string) ([]string, error) {
|
||||
s := []rune(in)
|
||||
columns := make([]string, 0)
|
||||
state := parseAllColumnsState_NONE
|
||||
quote := rune(0)
|
||||
name := make([]rune, 0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch state {
|
||||
case parseAllColumnsState_NONE:
|
||||
if s[i] == '(' {
|
||||
state = parseAllColumnsState_Beginning
|
||||
}
|
||||
case parseAllColumnsState_Beginning:
|
||||
if isSpace(s[i]) {
|
||||
continue
|
||||
}
|
||||
if isQuote(s[i]) {
|
||||
state = parseAllColumnsState_ReadingQuotedName
|
||||
quote = s[i]
|
||||
continue
|
||||
}
|
||||
if s[i] == '[' {
|
||||
state = parseAllColumnsState_ReadingQuotedName
|
||||
quote = ']'
|
||||
continue
|
||||
} else if s[i] == ')' {
|
||||
return columns, fmt.Errorf("unexpected token: %s", string(s[i]))
|
||||
}
|
||||
state = parseAllColumnsState_ReadingRawName
|
||||
name = append(name, s[i])
|
||||
case parseAllColumnsState_ReadingRawName:
|
||||
if isSeparator(s[i]) {
|
||||
state = parseAllColumnsState_Beginning
|
||||
columns = append(columns, string(name))
|
||||
name = make([]rune, 0)
|
||||
continue
|
||||
}
|
||||
if s[i] == ')' {
|
||||
state = parseAllColumnsState_State_End
|
||||
columns = append(columns, string(name))
|
||||
}
|
||||
if isQuote(s[i]) {
|
||||
return nil, fmt.Errorf("unexpected token: %s", string(s[i]))
|
||||
}
|
||||
if isSpace(s[i]) {
|
||||
state = parseAllColumnsState_EndOfName
|
||||
columns = append(columns, string(name))
|
||||
name = make([]rune, 0)
|
||||
continue
|
||||
}
|
||||
name = append(name, s[i])
|
||||
case parseAllColumnsState_ReadingQuotedName:
|
||||
if s[i] == quote {
|
||||
// check if quote character is escaped
|
||||
if i+1 < len(s) && s[i+1] == quote {
|
||||
name = append(name, quote)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
state = parseAllColumnsState_EndOfName
|
||||
columns = append(columns, string(name))
|
||||
name = make([]rune, 0)
|
||||
continue
|
||||
}
|
||||
name = append(name, s[i])
|
||||
case parseAllColumnsState_EndOfName:
|
||||
if isSpace(s[i]) {
|
||||
continue
|
||||
}
|
||||
if isSeparator(s[i]) {
|
||||
state = parseAllColumnsState_Beginning
|
||||
continue
|
||||
}
|
||||
if s[i] == ')' {
|
||||
state = parseAllColumnsState_State_End
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected token: %s", string(s[i]))
|
||||
case parseAllColumnsState_State_End:
|
||||
break
|
||||
}
|
||||
}
|
||||
if state != parseAllColumnsState_State_End {
|
||||
return nil, errors.New("unexpected end")
|
||||
}
|
||||
return columns, nil
|
||||
}
|
||||
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
func isQuote(r rune) bool {
|
||||
return r == '`' || r == '"' || r == '\''
|
||||
}
|
||||
|
||||
func isSeparator(r rune) bool {
|
||||
return r == ','
|
||||
}
|
||||
40
vendor/gorm.io/driver/sqlite/error_translator.go
generated
vendored
Normal file
40
vendor/gorm.io/driver/sqlite/error_translator.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// The error codes to map sqlite errors to gorm errors, here is a reference about error codes for sqlite https://www.sqlite.org/rescode.html.
|
||||
var errCodes = map[int]error{
|
||||
1555: gorm.ErrDuplicatedKey,
|
||||
2067: gorm.ErrDuplicatedKey,
|
||||
787: gorm.ErrForeignKeyViolated,
|
||||
}
|
||||
|
||||
type ErrMessage struct {
|
||||
Code int `json:"Code"`
|
||||
ExtendedCode int `json:"ExtendedCode"`
|
||||
SystemErrno int `json:"SystemErrno"`
|
||||
}
|
||||
|
||||
// Translate it will translate the error to native gorm errors.
|
||||
// We are not using go-sqlite3 error type intentionally here because it will need the CGO_ENABLED=1 and cross-C-compiler.
|
||||
func (dialector Dialector) Translate(err error) error {
|
||||
parsedErr, marshalErr := json.Marshal(err)
|
||||
if marshalErr != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errMsg ErrMessage
|
||||
unmarshalErr := json.Unmarshal(parsedErr, &errMsg)
|
||||
if unmarshalErr != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if translatedErr, found := errCodes[errMsg.ExtendedCode]; found {
|
||||
return translatedErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
7
vendor/gorm.io/driver/sqlite/errors.go
generated
vendored
Normal file
7
vendor/gorm.io/driver/sqlite/errors.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package sqlite
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrConstraintsNotImplemented = errors.New("constraints not implemented on sqlite, consider using DisableForeignKeyConstraintWhenMigrating, more details https://github.com/go-gorm/gorm/wiki/GORM-V2-Release-Note-Draft#all-new-migrator")
|
||||
)
|
||||
430
vendor/gorm.io/driver/sqlite/migrator.go
generated
vendored
Normal file
430
vendor/gorm.io/driver/sqlite/migrator.go
generated
vendored
Normal file
@@ -0,0 +1,430 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/migrator"
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
type Migrator struct {
|
||||
migrator.Migrator
|
||||
}
|
||||
|
||||
func (m *Migrator) RunWithoutForeignKey(fc func() error) error {
|
||||
var enabled int
|
||||
m.DB.Raw("PRAGMA foreign_keys").Scan(&enabled)
|
||||
if enabled == 1 {
|
||||
m.DB.Exec("PRAGMA foreign_keys = OFF")
|
||||
defer m.DB.Exec("PRAGMA foreign_keys = ON")
|
||||
}
|
||||
|
||||
return fc()
|
||||
}
|
||||
|
||||
func (m Migrator) HasTable(value interface{}) bool {
|
||||
var count int
|
||||
m.Migrator.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
return m.DB.Raw("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", stmt.Table).Row().Scan(&count)
|
||||
})
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (m Migrator) DropTable(values ...interface{}) error {
|
||||
return m.RunWithoutForeignKey(func() error {
|
||||
values = m.ReorderModels(values, false)
|
||||
tx := m.DB.Session(&gorm.Session{})
|
||||
|
||||
for i := len(values) - 1; i >= 0; i-- {
|
||||
if err := m.RunWithValue(values[i], func(stmt *gorm.Statement) error {
|
||||
return tx.Exec("DROP TABLE IF EXISTS ?", clause.Table{Name: stmt.Table}).Error
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) GetTables() (tableList []string, err error) {
|
||||
return tableList, m.DB.Raw("SELECT name FROM sqlite_master where type=?", "table").Scan(&tableList).Error
|
||||
}
|
||||
|
||||
func (m Migrator) HasColumn(value interface{}, name string) bool {
|
||||
var count int
|
||||
m.Migrator.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if stmt.Schema != nil {
|
||||
if field := stmt.Schema.LookUpField(name); field != nil {
|
||||
name = field.DBName
|
||||
}
|
||||
}
|
||||
|
||||
if name != "" {
|
||||
m.DB.Raw(
|
||||
"SELECT count(*) FROM sqlite_master WHERE type = ? AND tbl_name = ? AND (sql LIKE ? OR sql LIKE ? OR sql LIKE ? OR sql LIKE ? OR sql LIKE ?)",
|
||||
"table", stmt.Table, `%"`+name+`" %`, `%`+name+` %`, "%`"+name+"`%", "%["+name+"]%", "%\t"+name+"\t%",
|
||||
).Row().Scan(&count)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (m Migrator) AlterColumn(value interface{}, name string) error {
|
||||
return m.RunWithoutForeignKey(func() error {
|
||||
return m.recreateTable(value, nil, func(ddl *ddl, stmt *gorm.Statement) (*ddl, []interface{}, error) {
|
||||
if field := stmt.Schema.LookUpField(name); field != nil {
|
||||
var sqlArgs []interface{}
|
||||
for i, f := range ddl.fields {
|
||||
if matches := columnRegexp.FindStringSubmatch(f); len(matches) > 1 && matches[1] == field.DBName {
|
||||
ddl.fields[i] = fmt.Sprintf("`%v` ?", field.DBName)
|
||||
sqlArgs = []interface{}{m.FullDataTypeOf(field)}
|
||||
// table created by old version might look like `CREATE TABLE ? (? varchar(10) UNIQUE)`.
|
||||
// FullDataTypeOf doesn't contain UNIQUE, so we need to add unique constraint.
|
||||
if strings.Contains(strings.ToUpper(matches[3]), " UNIQUE") {
|
||||
uniName := m.DB.NamingStrategy.UniqueName(stmt.Table, field.DBName)
|
||||
uni, _ := m.GuessConstraintInterfaceAndTable(stmt, uniName)
|
||||
if uni != nil {
|
||||
uniSQL, uniArgs := uni.Build()
|
||||
ddl.addConstraint(uniName, uniSQL)
|
||||
sqlArgs = append(sqlArgs, uniArgs...)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return ddl, sqlArgs, nil
|
||||
}
|
||||
return nil, nil, fmt.Errorf("failed to alter field with name %v", name)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// ColumnTypes return columnTypes []gorm.ColumnType and execErr error
|
||||
func (m Migrator) ColumnTypes(value interface{}) ([]gorm.ColumnType, error) {
|
||||
columnTypes := make([]gorm.ColumnType, 0)
|
||||
execErr := m.RunWithValue(value, func(stmt *gorm.Statement) (err error) {
|
||||
var (
|
||||
sqls []string
|
||||
sqlDDL *ddl
|
||||
)
|
||||
|
||||
if err := m.DB.Raw("SELECT sql FROM sqlite_master WHERE type IN ? AND tbl_name = ? AND sql IS NOT NULL order by type = ? desc", []string{"table", "index"}, stmt.Table, "table").Scan(&sqls).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sqlDDL, err = parseDDL(sqls...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := m.DB.Session(&gorm.Session{}).Table(stmt.Table).Limit(1).Rows()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err = rows.Close()
|
||||
}()
|
||||
|
||||
var rawColumnTypes []*sql.ColumnType
|
||||
rawColumnTypes, err = rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range rawColumnTypes {
|
||||
columnType := migrator.ColumnType{SQLColumnType: c}
|
||||
for _, column := range sqlDDL.columns {
|
||||
if column.NameValue.String == c.Name() {
|
||||
column.SQLColumnType = c
|
||||
columnType = column
|
||||
break
|
||||
}
|
||||
}
|
||||
columnTypes = append(columnTypes, columnType)
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
return columnTypes, execErr
|
||||
}
|
||||
|
||||
func (m Migrator) DropColumn(value interface{}, name string) error {
|
||||
return m.recreateTable(value, nil, func(ddl *ddl, stmt *gorm.Statement) (*ddl, []interface{}, error) {
|
||||
if field := stmt.Schema.LookUpField(name); field != nil {
|
||||
name = field.DBName
|
||||
}
|
||||
|
||||
ddl.removeColumn(name)
|
||||
return ddl, nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) CreateConstraint(value interface{}, name string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
constraint, table := m.GuessConstraintInterfaceAndTable(stmt, name)
|
||||
|
||||
return m.recreateTable(value, &table,
|
||||
func(ddl *ddl, stmt *gorm.Statement) (*ddl, []interface{}, error) {
|
||||
var (
|
||||
constraintName string
|
||||
constraintSql string
|
||||
constraintValues []interface{}
|
||||
)
|
||||
|
||||
if constraint != nil {
|
||||
constraintName = constraint.GetName()
|
||||
constraintSql, constraintValues = constraint.Build()
|
||||
} else {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
ddl.addConstraint(constraintName, constraintSql)
|
||||
return ddl, constraintValues, nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) DropConstraint(value interface{}, name string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
constraint, table := m.GuessConstraintInterfaceAndTable(stmt, name)
|
||||
if constraint != nil {
|
||||
name = constraint.GetName()
|
||||
}
|
||||
|
||||
return m.recreateTable(value, &table,
|
||||
func(ddl *ddl, stmt *gorm.Statement) (*ddl, []interface{}, error) {
|
||||
ddl.removeConstraint(name)
|
||||
return ddl, nil, nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) HasConstraint(value interface{}, name string) bool {
|
||||
var count int64
|
||||
m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
constraint, table := m.GuessConstraintInterfaceAndTable(stmt, name)
|
||||
if constraint != nil {
|
||||
name = constraint.GetName()
|
||||
}
|
||||
|
||||
m.DB.Raw(
|
||||
"SELECT count(*) FROM sqlite_master WHERE type = ? AND tbl_name = ? AND (sql LIKE ? OR sql LIKE ? OR sql LIKE ? OR sql LIKE ? OR sql LIKE ?)",
|
||||
"table", table, `%CONSTRAINT "`+name+`" %`, `%CONSTRAINT `+name+` %`, "%CONSTRAINT `"+name+"`%", "%CONSTRAINT ["+name+"]%", "%CONSTRAINT \t"+name+"\t%",
|
||||
).Row().Scan(&count)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (m Migrator) CurrentDatabase() (name string) {
|
||||
var null interface{}
|
||||
m.DB.Raw("PRAGMA database_list").Row().Scan(&null, &name, &null)
|
||||
return
|
||||
}
|
||||
|
||||
func (m Migrator) BuildIndexOptions(opts []schema.IndexOption, stmt *gorm.Statement) (results []interface{}) {
|
||||
for _, opt := range opts {
|
||||
str := stmt.Quote(opt.DBName)
|
||||
if opt.Expression != "" {
|
||||
str = opt.Expression
|
||||
}
|
||||
|
||||
if opt.Collate != "" {
|
||||
str += " COLLATE " + opt.Collate
|
||||
}
|
||||
|
||||
if opt.Sort != "" {
|
||||
str += " " + opt.Sort
|
||||
}
|
||||
results = append(results, clause.Expr{SQL: str})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m Migrator) CreateIndex(value interface{}, name string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if stmt.Schema != nil {
|
||||
if idx := stmt.Schema.LookIndex(name); idx != nil {
|
||||
opts := m.BuildIndexOptions(idx.Fields, stmt)
|
||||
values := []interface{}{clause.Column{Name: idx.Name}, clause.Table{Name: stmt.Table}, opts}
|
||||
|
||||
createIndexSQL := "CREATE "
|
||||
if idx.Class != "" {
|
||||
createIndexSQL += idx.Class + " "
|
||||
}
|
||||
createIndexSQL += "INDEX ?"
|
||||
|
||||
if idx.Type != "" {
|
||||
createIndexSQL += " USING " + idx.Type
|
||||
}
|
||||
createIndexSQL += " ON ??"
|
||||
|
||||
if idx.Where != "" {
|
||||
createIndexSQL += " WHERE " + idx.Where
|
||||
}
|
||||
|
||||
return m.DB.Exec(createIndexSQL, values...).Error
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to create index with name %v", name)
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) HasIndex(value interface{}, name string) bool {
|
||||
var count int
|
||||
m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if stmt.Schema != nil {
|
||||
if idx := stmt.Schema.LookIndex(name); idx != nil {
|
||||
name = idx.Name
|
||||
}
|
||||
}
|
||||
|
||||
if name != "" {
|
||||
m.DB.Raw(
|
||||
"SELECT count(*) FROM sqlite_master WHERE type = ? AND tbl_name = ? AND name = ?", "index", stmt.Table, name,
|
||||
).Row().Scan(&count)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return count > 0
|
||||
}
|
||||
|
||||
func (m Migrator) RenameIndex(value interface{}, oldName, newName string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
var sql string
|
||||
m.DB.Raw("SELECT sql FROM sqlite_master WHERE type = ? AND tbl_name = ? AND name = ?", "index", stmt.Table, oldName).Row().Scan(&sql)
|
||||
if sql != "" {
|
||||
if err := m.DropIndex(value, oldName); err != nil {
|
||||
return err
|
||||
}
|
||||
return m.DB.Exec(strings.Replace(sql, oldName, newName, 1)).Error
|
||||
}
|
||||
return fmt.Errorf("failed to find index with name %v", oldName)
|
||||
})
|
||||
}
|
||||
|
||||
func (m Migrator) DropIndex(value interface{}, name string) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
if stmt.Schema != nil {
|
||||
if idx := stmt.Schema.LookIndex(name); idx != nil {
|
||||
name = idx.Name
|
||||
}
|
||||
}
|
||||
|
||||
return m.DB.Exec("DROP INDEX ?", clause.Column{Name: name}).Error
|
||||
})
|
||||
}
|
||||
|
||||
type Index struct {
|
||||
Seq int
|
||||
Name string
|
||||
Unique bool
|
||||
Origin string
|
||||
Partial bool
|
||||
}
|
||||
|
||||
// GetIndexes return Indexes []gorm.Index and execErr error,
|
||||
// See the [doc]
|
||||
//
|
||||
// [doc]: https://www.sqlite.org/pragma.html#pragma_index_list
|
||||
func (m Migrator) GetIndexes(value interface{}) ([]gorm.Index, error) {
|
||||
indexes := make([]gorm.Index, 0)
|
||||
err := m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
rst := make([]*Index, 0)
|
||||
if err := m.DB.Debug().Raw("SELECT * FROM PRAGMA_index_list(?)", stmt.Table).Scan(&rst).Error; err != nil { // alias `PRAGMA index_list(?)`
|
||||
return err
|
||||
}
|
||||
for _, index := range rst {
|
||||
if index.Origin == "u" { // skip the index was created by a UNIQUE constraint
|
||||
continue
|
||||
}
|
||||
var columns []string
|
||||
if err := m.DB.Raw("SELECT name FROM PRAGMA_index_info(?)", index.Name).Scan(&columns).Error; err != nil { // alias `PRAGMA index_info(?)`
|
||||
return err
|
||||
}
|
||||
indexes = append(indexes, &migrator.Index{
|
||||
TableName: stmt.Table,
|
||||
NameValue: index.Name,
|
||||
ColumnList: columns,
|
||||
PrimaryKeyValue: sql.NullBool{Bool: index.Origin == "pk", Valid: true}, // The exceptions are INTEGER PRIMARY KEY
|
||||
UniqueValue: sql.NullBool{Bool: index.Unique, Valid: true},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return indexes, err
|
||||
}
|
||||
|
||||
func (m Migrator) getRawDDL(table string) (string, error) {
|
||||
var createSQL string
|
||||
m.DB.Raw("SELECT sql FROM sqlite_master WHERE type = ? AND tbl_name = ? AND name = ?", "table", table, table).Row().Scan(&createSQL)
|
||||
|
||||
if m.DB.Error != nil {
|
||||
return "", m.DB.Error
|
||||
}
|
||||
return createSQL, nil
|
||||
}
|
||||
|
||||
func (m Migrator) recreateTable(
|
||||
value interface{}, tablePtr *string,
|
||||
getCreateSQL func(ddl *ddl, stmt *gorm.Statement) (sql *ddl, sqlArgs []interface{}, err error),
|
||||
) error {
|
||||
return m.RunWithValue(value, func(stmt *gorm.Statement) error {
|
||||
table := stmt.Table
|
||||
if tablePtr != nil {
|
||||
table = *tablePtr
|
||||
}
|
||||
|
||||
rawDDL, err := m.getRawDDL(table)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
originDDL, err := parseDDL(rawDDL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createDDL, sqlArgs, err := getCreateSQL(originDDL.clone(), stmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if createDDL == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
newTableName := table + "__temp"
|
||||
if err := createDDL.renameTable(newTableName, table); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
columns := createDDL.getColumns()
|
||||
createSQL := createDDL.compile()
|
||||
|
||||
return m.DB.Transaction(func(tx *gorm.DB) error {
|
||||
if err := tx.Exec(createSQL, sqlArgs...).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queries := []string{
|
||||
fmt.Sprintf("INSERT INTO `%v`(%v) SELECT %v FROM `%v`", newTableName, strings.Join(columns, ","), strings.Join(columns, ","), table),
|
||||
fmt.Sprintf("DROP TABLE `%v`", table),
|
||||
fmt.Sprintf("ALTER TABLE `%v` RENAME TO `%v`", newTableName, table),
|
||||
}
|
||||
for _, query := range queries {
|
||||
if err := tx.Exec(query).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
270
vendor/gorm.io/driver/sqlite/sqlite.go
generated
vendored
Normal file
270
vendor/gorm.io/driver/sqlite/sqlite.go
generated
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strconv"
|
||||
|
||||
"gorm.io/gorm/callbacks"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/migrator"
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
// DriverName is the default driver name for SQLite.
|
||||
const DriverName = "sqlite3"
|
||||
|
||||
type Dialector struct {
|
||||
DriverName string
|
||||
DSN string
|
||||
Conn gorm.ConnPool
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
DriverName string
|
||||
DSN string
|
||||
Conn gorm.ConnPool
|
||||
}
|
||||
|
||||
func Open(dsn string) gorm.Dialector {
|
||||
return &Dialector{DSN: dsn}
|
||||
}
|
||||
|
||||
func New(config Config) gorm.Dialector {
|
||||
return &Dialector{DSN: config.DSN, DriverName: config.DriverName, Conn: config.Conn}
|
||||
}
|
||||
|
||||
func (dialector Dialector) Name() string {
|
||||
return "sqlite"
|
||||
}
|
||||
|
||||
func (dialector Dialector) Initialize(db *gorm.DB) (err error) {
|
||||
if dialector.DriverName == "" {
|
||||
dialector.DriverName = DriverName
|
||||
}
|
||||
|
||||
if dialector.Conn != nil {
|
||||
db.ConnPool = dialector.Conn
|
||||
} else {
|
||||
conn, err := sql.Open(dialector.DriverName, dialector.DSN)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.ConnPool = conn
|
||||
}
|
||||
|
||||
var version string
|
||||
if err := db.ConnPool.QueryRowContext(context.Background(), "select sqlite_version()").Scan(&version); err != nil {
|
||||
return err
|
||||
}
|
||||
// https://www.sqlite.org/releaselog/3_35_0.html
|
||||
if compareVersion(version, "3.35.0") >= 0 {
|
||||
callbacks.RegisterDefaultCallbacks(db, &callbacks.Config{
|
||||
CreateClauses: []string{"INSERT", "VALUES", "ON CONFLICT", "RETURNING"},
|
||||
UpdateClauses: []string{"UPDATE", "SET", "FROM", "WHERE", "RETURNING"},
|
||||
DeleteClauses: []string{"DELETE", "FROM", "WHERE", "RETURNING"},
|
||||
LastInsertIDReversed: true,
|
||||
})
|
||||
} else {
|
||||
callbacks.RegisterDefaultCallbacks(db, &callbacks.Config{
|
||||
LastInsertIDReversed: true,
|
||||
})
|
||||
}
|
||||
|
||||
for k, v := range dialector.ClauseBuilders() {
|
||||
if _, ok := db.ClauseBuilders[k]; !ok {
|
||||
db.ClauseBuilders[k] = v
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dialector Dialector) ClauseBuilders() map[string]clause.ClauseBuilder {
|
||||
return map[string]clause.ClauseBuilder{
|
||||
"INSERT": func(c clause.Clause, builder clause.Builder) {
|
||||
if insert, ok := c.Expression.(clause.Insert); ok {
|
||||
if stmt, ok := builder.(*gorm.Statement); ok {
|
||||
stmt.WriteString("INSERT ")
|
||||
if insert.Modifier != "" {
|
||||
stmt.WriteString(insert.Modifier)
|
||||
stmt.WriteByte(' ')
|
||||
}
|
||||
|
||||
stmt.WriteString("INTO ")
|
||||
if insert.Table.Name == "" {
|
||||
stmt.WriteQuoted(stmt.Table)
|
||||
} else {
|
||||
stmt.WriteQuoted(insert.Table)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.Build(builder)
|
||||
},
|
||||
"LIMIT": func(c clause.Clause, builder clause.Builder) {
|
||||
if limit, ok := c.Expression.(clause.Limit); ok {
|
||||
var lmt = -1
|
||||
if limit.Limit != nil && *limit.Limit >= 0 {
|
||||
lmt = *limit.Limit
|
||||
}
|
||||
if lmt >= 0 || limit.Offset > 0 {
|
||||
builder.WriteString("LIMIT ")
|
||||
builder.WriteString(strconv.Itoa(lmt))
|
||||
}
|
||||
if limit.Offset > 0 {
|
||||
builder.WriteString(" OFFSET ")
|
||||
builder.WriteString(strconv.Itoa(limit.Offset))
|
||||
}
|
||||
}
|
||||
},
|
||||
"FOR": func(c clause.Clause, builder clause.Builder) {
|
||||
if _, ok := c.Expression.(clause.Locking); ok {
|
||||
// SQLite3 does not support row-level locking.
|
||||
return
|
||||
}
|
||||
c.Build(builder)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (dialector Dialector) DefaultValueOf(field *schema.Field) clause.Expression {
|
||||
if field.AutoIncrement {
|
||||
return clause.Expr{SQL: "NULL"}
|
||||
}
|
||||
|
||||
// doesn't work, will raise error
|
||||
return clause.Expr{SQL: "DEFAULT"}
|
||||
}
|
||||
|
||||
func (dialector Dialector) Migrator(db *gorm.DB) gorm.Migrator {
|
||||
return Migrator{migrator.Migrator{Config: migrator.Config{
|
||||
DB: db,
|
||||
Dialector: dialector,
|
||||
CreateIndexAfterCreateTable: true,
|
||||
}}}
|
||||
}
|
||||
|
||||
func (dialector Dialector) BindVarTo(writer clause.Writer, stmt *gorm.Statement, v interface{}) {
|
||||
writer.WriteByte('?')
|
||||
}
|
||||
|
||||
func (dialector Dialector) QuoteTo(writer clause.Writer, str string) {
|
||||
var (
|
||||
underQuoted, selfQuoted bool
|
||||
continuousBacktick int8
|
||||
shiftDelimiter int8
|
||||
)
|
||||
|
||||
for _, v := range []byte(str) {
|
||||
switch v {
|
||||
case '`':
|
||||
continuousBacktick++
|
||||
if continuousBacktick == 2 {
|
||||
writer.WriteString("``")
|
||||
continuousBacktick = 0
|
||||
}
|
||||
case '.':
|
||||
if continuousBacktick > 0 || !selfQuoted {
|
||||
shiftDelimiter = 0
|
||||
underQuoted = false
|
||||
continuousBacktick = 0
|
||||
writer.WriteString("`")
|
||||
}
|
||||
writer.WriteByte(v)
|
||||
continue
|
||||
default:
|
||||
if shiftDelimiter-continuousBacktick <= 0 && !underQuoted {
|
||||
writer.WriteString("`")
|
||||
underQuoted = true
|
||||
if selfQuoted = continuousBacktick > 0; selfQuoted {
|
||||
continuousBacktick -= 1
|
||||
}
|
||||
}
|
||||
|
||||
for ; continuousBacktick > 0; continuousBacktick -= 1 {
|
||||
writer.WriteString("``")
|
||||
}
|
||||
|
||||
writer.WriteByte(v)
|
||||
}
|
||||
shiftDelimiter++
|
||||
}
|
||||
|
||||
if continuousBacktick > 0 && !selfQuoted {
|
||||
writer.WriteString("``")
|
||||
}
|
||||
writer.WriteString("`")
|
||||
}
|
||||
|
||||
func (dialector Dialector) Explain(sql string, vars ...interface{}) string {
|
||||
return logger.ExplainSQL(sql, nil, `"`, vars...)
|
||||
}
|
||||
|
||||
func (dialector Dialector) DataTypeOf(field *schema.Field) string {
|
||||
switch field.DataType {
|
||||
case schema.Bool:
|
||||
return "numeric"
|
||||
case schema.Int, schema.Uint:
|
||||
if field.AutoIncrement {
|
||||
// doesn't check `PrimaryKey`, to keep backward compatibility
|
||||
// https://www.sqlite.org/autoinc.html
|
||||
return "integer PRIMARY KEY AUTOINCREMENT"
|
||||
} else {
|
||||
return "integer"
|
||||
}
|
||||
case schema.Float:
|
||||
return "real"
|
||||
case schema.String:
|
||||
return "text"
|
||||
case schema.Time:
|
||||
// Distinguish between schema.Time and tag time
|
||||
if val, ok := field.TagSettings["TYPE"]; ok {
|
||||
return val
|
||||
} else {
|
||||
return "datetime"
|
||||
}
|
||||
case schema.Bytes:
|
||||
return "blob"
|
||||
}
|
||||
|
||||
return string(field.DataType)
|
||||
}
|
||||
|
||||
func (dialectopr Dialector) SavePoint(tx *gorm.DB, name string) error {
|
||||
tx.Exec("SAVEPOINT " + name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dialectopr Dialector) RollbackTo(tx *gorm.DB, name string) error {
|
||||
tx.Exec("ROLLBACK TO SAVEPOINT " + name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareVersion(version1, version2 string) int {
|
||||
n, m := len(version1), len(version2)
|
||||
i, j := 0, 0
|
||||
for i < n || j < m {
|
||||
x := 0
|
||||
for ; i < n && version1[i] != '.'; i++ {
|
||||
x = x*10 + int(version1[i]-'0')
|
||||
}
|
||||
i++
|
||||
y := 0
|
||||
for ; j < m && version2[j] != '.'; j++ {
|
||||
y = y*10 + int(version2[j]-'0')
|
||||
}
|
||||
j++
|
||||
if x > y {
|
||||
return 1
|
||||
}
|
||||
if x < y {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
Reference in New Issue
Block a user