commit e2dd29259f07b7d0e7865f1b8b785e745b6cde1c Author: Patrick Nagurny Date: Fri Oct 19 15:31:41 2018 -0400 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bc300bf --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +config.json +*.crt +*.key +*.csr +*.sublime-project +*.sublime-workspace diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..bed91b5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright 2018 Open Accounting, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/apidoc.json b/apidoc.json new file mode 100644 index 0000000..597c8bf --- /dev/null +++ b/apidoc.json @@ -0,0 +1,7 @@ +{ + "name": "OpenAccounting", + "version": "1.0.0", + "description": "Open Accounting API documentation", + "title": "Open Accounting API documentation", + "url" : "https://openaccounting.io/api" +} \ No newline at end of file diff --git a/config.json.sample b/config.json.sample new file mode 100644 index 0000000..1402ca7 --- /dev/null +++ b/config.json.sample @@ -0,0 +1,12 @@ +{ + "WebUrl": "https://domain.com", + "Port": 8080, + "KeyFile": "", + "CertFile": "", + "Database": "openaccounting", + "User": "openaccounting", + "Password": "openaccounting", + "SendgridKey": "", + "SendgridEmail": "noreply@domain.com", + "SendgridSender": "Sender" +} \ No newline at end of file diff --git a/core/api/account.go b/core/api/account.go new file mode 100644 index 0000000..4d527d8 --- /dev/null +++ b/core/api/account.go @@ -0,0 +1,302 @@ +package api + +import ( + "encoding/json" + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "io/ioutil" + "net/http" + "strconv" + "time" +) + +/** + * @api {get} /orgs/:orgId/accounts Get Accounts by Org id + * @apiVersion 1.0.0 + * @apiName GetOrgAccounts + * @apiGroup Account + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Account. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {Date} inserted Date Account was created + * @apiSuccess {Date} updated Date Account was updated + * @apiSuccess {String} name Name of the Account. + * @apiSuccess {String} parent Id of the parent Account. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * @apiSuccess {Boolean} debitBalance True if Account has a debit balance. + * @apiSuccess {Number} balance Current Account balance in this Account's currency + * @apiSuccess {Number} nativeBalance Current Account balance in the Org's currency + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "22222222222222222222222222222222", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "Cash", + * "parent": "11111111111111111111111111111111", + * "currency": "USD", + * "precision": 2, + * "debitBalance": true, + * "balance": 10000, + * "nativeBalance": 10000 + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetOrgAccounts(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + // TODO how do we make date an optional parameter + // instead of resorting to this hack? + date := time.Date(2100, time.January, 1, 0, 0, 0, 0, time.UTC) + + dateParam := r.URL.Query().Get("date") + + if dateParam != "" { + dateParamNumeric, err := strconv.ParseInt(dateParam, 10, 64) + + if err != nil { + rest.Error(w, "invalid date", 400) + return + } + date = time.Unix(0, dateParamNumeric*1000000) + } + + accounts, err := model.Instance.GetAccountsWithBalances(orgId, user.Id, "", date) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&accounts) +} + +/** + * @api {post} /orgs/:orgId/accounts Create a new Account + * @apiVersion 1.0.0 + * @apiName PostAccount + * @apiGroup Account + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id Id 32 character hex string + * @apiParam {String} name Name of the Account. + * @apiParam {String} parent Id of the parent Account. + * @apiParam {String} currency Three letter currency code. + * @apiParam {Number} precision How many digits the currency goes out to. + * @apiParam {Boolean} debitBalance True if account has a debit balance. + * @apiParam {Number} balance Current Account balance in this Account's currency + * @apiParam {Number} nativeBalance Current Account balance in the Org's currency + * + * @apiSuccess {String} id Id of the Account. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {Date} inserted Date Account was created + * @apiSuccess {Date} updated Date Account was updated + * @apiSuccess {String} name Name of the Account. + * @apiSuccess {String} parent Id of the parent Account. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * @apiSuccess {Boolean} debitBalance True if account has a debit balance. + * @apiSuccess {Number} balance Current Account balance in this Account's currency + * @apiSuccess {Number} nativeBalance Current Account balance in the Org's currency + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "22222222222222222222222222222222", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "Cash", + * "parent": "11111111111111111111111111111111", + * "currency": "USD", + * "precision": 2, + * "debitBalance": true, + * "balance": 10000, + * "nativeBalance": 10000 + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostAccount(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + content, err := ioutil.ReadAll(r.Body) + r.Body.Close() + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if len(content) == 0 { + rest.Error(w, "JSON payload is empty", http.StatusInternalServerError) + return + } + + account := types.NewAccount() + + err = json.Unmarshal(content, &account) + + if err != nil { + // Maybe it's an array of accounts? + PostAccounts(w, r, content) + return + } + + account.OrgId = orgId + err = model.Instance.CreateAccount(account, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&account) +} + +func PostAccounts(w rest.ResponseWriter, r *rest.Request, content []byte) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + accounts := make([]*types.Account, 0) + + err := json.Unmarshal(content, &accounts) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for _, account := range accounts { + account.OrgId = orgId + err = model.Instance.CreateAccount(account, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } + + w.WriteJson(accounts) +} + +/** + * @api {put} /orgs/:orgId/accounts/:accountId Modify an Account + * @apiVersion 1.0.0 + * @apiName PutAccount + * @apiGroup Account + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id Id 32 character hex string + * @apiParam {String} name Name of the Account. + * @apiParam {String} parent Id of the parent Account. + * @apiParam {String} currency Three letter currency code. + * @apiParam {Number} precision How many digits the currency goes out to. + * @apiParam {Boolean} debitBalance True if Account has a debit balance. + * @apiParam {Number} balance Current Account balance in this Account's currency + * @apiParam {Number} nativeBalance Current Account balance in the Org's currency + * + * @apiSuccess {String} id Id of the Account. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {Date} inserted Date Account was created + * @apiSuccess {Date} updated Date Account was updated + * @apiSuccess {String} name Name of the Account. + * @apiSuccess {String} parent Id of the parent Account. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * @apiSuccess {Boolean} debitBalance True if Account has a debit balance. + * @apiSuccess {Number} balance Current Account balance in this Account's currency + * @apiSuccess {Number} nativeBalance Current Account balance in the Org's currency + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "22222222222222222222222222222222", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "Cash", + * "parent": "11111111111111111111111111111111", + * "currency": "USD", + * "precision": 2, + * "debitBalance": true, + * "balance": 10000, + * "nativeBalance": 10000 + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PutAccount(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + accountId := r.PathParam("accountId") + + account := types.Account{} + err := r.DecodeJsonPayload(&account) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + account.Id = accountId + account.OrgId = orgId + + err = model.Instance.UpdateAccount(&account, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&account) +} + +/** + * @api {delete} /orgs/:orgId/accounts/:accountId Delete an Account + * @apiVersion 1.0.0 + * @apiName DeleteAccount + * @apiGroup Account + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeleteAccount(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + accountId := r.PathParam("accountId") + + err := model.Instance.DeleteAccount(accountId, user.Id, orgId) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/api.go b/core/api/api.go new file mode 100644 index 0000000..791a851 --- /dev/null +++ b/core/api/api.go @@ -0,0 +1,77 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" +) + +/** + * @apiDefine NotAuthorizedError + * + * @apiError NotAuthorized API request does not have proper credentials + * + * @apiErrorExample Error-Response: + * HTTP/1.1 403 Not Authorized + */ + +/** + * @apiDefine InternalServerError + * + * @apiError InternalServer An internal error occurred + * + * @apiErrorExample Error-Response: + * HTTP/1.1 500 Internal Server Error + * { + * "error": "id required" + * } + * + */ + +func Init() (*rest.Api, error) { + rest.ErrorFieldName = "error" + app := rest.NewApi() + + logger := &LoggerMiddleware{} + + var stack = []rest.Middleware{ + logger, + &rest.RecorderMiddleware{}, + &rest.TimerMiddleware{}, + &rest.PoweredByMiddleware{}, + &rest.RecoverMiddleware{}, + &rest.GzipMiddleware{}, + &rest.ContentTypeCheckerMiddleware{}, + } + + app.Use(stack...) + + app.Use(&rest.CorsMiddleware{ + RejectNonCorsRequests: false, + OriginValidator: func(origin string, request *rest.Request) bool { + //return origin == "http://localhost:4200" + return true + }, + AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"}, + AllowedHeaders: []string{ + "Accept", "Content-Type", "X-Custom-Header", "Origin", "Authorization", "Accept-Version"}, + AccessControlAllowCredentials: true, + AccessControlMaxAge: 3600, + }) + + auth := &AuthMiddleware{ + Realm: "openaccounting", + } + + version := &VersionMiddleware{} + + app.Use(auth) + app.Use(version) + + router, err := GetRouter(auth) + if err != nil { + return nil, err + } + + app.SetApp(router) + + return app, nil +} diff --git a/core/api/apikey.go b/core/api/apikey.go new file mode 100644 index 0000000..3f25bcd --- /dev/null +++ b/core/api/apikey.go @@ -0,0 +1,188 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" +) + +/** + * @api {get} /apikeys Get API keys + * @apiVersion 1.0.0 + * @apiName GetApiKeys + * @apiGroup ApiKey + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the ApiKey. + * @apiSuccess {Date} inserted Date ApiKey was created + * @apiSuccess {Date} updated Date Last activity for the ApiKey + * @apiSuccess {String} userId Id of the User + * @apiSuccess {String} label Label + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "userId": "22222222222222222222222222222222", + * "label": "Shopping Cart" + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetApiKeys(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + + keys, err := model.Instance.GetApiKeys(user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(keys) +} + +/** + * @api {post} /apikeys Create a new API key + * @apiVersion 1.0.0 + * @apiName PostApiKey + * @apiGroup ApiKey + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * @apiHeader {String} Authorization HTTP Basic Auth + * + * @apiParam {String} id 32 character hex string + * @apiParam {String} label Label + * + * @apiSuccess {String} id Id of the ApiKey. + * @apiSuccess {Date} inserted Date ApiKey was created + * @apiSuccess {Date} updated Date Last activity for the ApiKey + * @apiSuccess {String} userId Id of the User + * @apiSuccess {String} label Label + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "userId": "22222222222222222222222222222222", + * "label": "Shopping Cart" + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostApiKey(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + key := &types.ApiKey{} + + err := r.DecodeJsonPayload(key) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + key.UserId = user.Id + + err = model.Instance.CreateApiKey(key) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(key) +} + +/** + * @api {put} /apikeys Modify an API key + * @apiVersion 1.0.0 + * @apiName PutApiKey + * @apiGroup ApiKey + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * @apiHeader {String} Authorization HTTP Basic Auth + * + * @apiParam {String} id 32 character hex string + * @apiParam {String} label Label + * + * @apiSuccess {String} id Id of the ApiKey. + * @apiSuccess {Date} inserted Date ApiKey was created + * @apiSuccess {Date} updated Date Last activity for the ApiKey + * @apiSuccess {String} userId Id of the User + * @apiSuccess {String} label Label + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "userId": "22222222222222222222222222222222", + * "label": "Shopping Cart" + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PutApiKey(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + key := &types.ApiKey{} + keyId := r.PathParam("apiKeyId") + + err := r.DecodeJsonPayload(key) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + key.Id = keyId + key.UserId = user.Id + + err = model.Instance.UpdateApiKey(key) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(key) +} + +/** + * @api {delete} /apikeys/:apiKeyId Delete an API key + * @apiVersion 1.0.0 + * @apiName DeleteApiKey + * @apiGroup ApiKey + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeleteApiKey(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + id := r.PathParam("apiKeyId") + + err := model.Instance.DeleteApiKey(id, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/auth.go b/core/api/auth.go new file mode 100644 index 0000000..b459d81 --- /dev/null +++ b/core/api/auth.go @@ -0,0 +1,93 @@ +package api + +import ( + "encoding/base64" + "errors" + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/auth" + "log" + "net/http" + "strings" +) + +type AuthMiddleware struct { + + // Realm name to display to the user. Required. + Realm string +} + +// MiddlewareFunc makes AuthMiddleware implement the Middleware interface. +func (mw *AuthMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc { + + if mw.Realm == "" { + log.Fatal("Realm is required") + } + + return func(writer rest.ResponseWriter, request *rest.Request) { + + authHeader := request.Header.Get("Authorization") + if authHeader == "" { + request.Env["USER"] = nil + handler(writer, request) + return + } + + emailOrKey, password, err := mw.decodeBasicAuthHeader(authHeader) + + if err != nil { + rest.Error(writer, "Invalid authentication", http.StatusBadRequest) + return + } + + // authenticate via session, apikey or user + user, err := auth.Instance.Authenticate(emailOrKey, password) + + if err == nil { + request.Env["USER"] = user + handler(writer, request) + return + } + + log.Println("Unauthorized " + emailOrKey) + + mw.unauthorized(writer) + return + } +} + +func (mw *AuthMiddleware) unauthorized(writer rest.ResponseWriter) { + writer.Header().Set("WWW-Authenticate", "Basic realm="+mw.Realm) + rest.Error(writer, "Not Authorized", http.StatusUnauthorized) +} + +func (mw *AuthMiddleware) decodeBasicAuthHeader(header string) (user string, password string, err error) { + + parts := strings.SplitN(header, " ", 2) + if !(len(parts) == 2 && parts[0] == "Basic") { + return "", "", errors.New("Invalid authentication") + } + + decoded, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", errors.New("Invalid base64") + } + + creds := strings.SplitN(string(decoded), ":", 2) + if len(creds) != 2 { + return "", "", errors.New("Invalid authentication") + } + + return creds[0], creds[1], nil +} + +func (mw *AuthMiddleware) RequireAuth(handler rest.HandlerFunc) rest.HandlerFunc { + return func(writer rest.ResponseWriter, request *rest.Request) { + + if request.Env["USER"] == nil { + mw.unauthorized(writer) + return + } + + handler(writer, request) + } +} diff --git a/core/api/logger.go b/core/api/logger.go new file mode 100644 index 0000000..43921de --- /dev/null +++ b/core/api/logger.go @@ -0,0 +1,89 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model/types" + "log" + "net" + "os" + "strconv" + "time" +) + +type LoggerMiddleware struct { + Logger *log.Logger +} + +func (mw *LoggerMiddleware) MiddlewareFunc(h rest.HandlerFunc) rest.HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + return func(w rest.ResponseWriter, r *rest.Request) { + h(w, r) + + message := getIp(r) + + message = message + " " + getUser(r) + message = message + " " + getTime(r) + message = message + " " + getRequest(r) + message = message + " " + getStatus(r) + message = message + " " + getBytes(r) + message = message + " " + getUserAgent(r) + + mw.Logger.Print(message) + } +} + +func getIp(r *rest.Request) string { + remoteAddr := r.RemoteAddr + if remoteAddr != "" { + if ip, _, err := net.SplitHostPort(remoteAddr); err == nil { + return ip + } + } + return "" +} + +func getUser(r *rest.Request) string { + if r.Env["USER"] != nil { + user := r.Env["USER"].(*types.User) + return user.Email + } + + return "-" +} + +func getTime(r *rest.Request) string { + if r.Env["START_TIME"] != nil { + return r.Env["START_TIME"].(*time.Time).Format("02/Jan/2006:15:04:05 -0700") + } + return "-" +} + +func getRequest(r *rest.Request) string { + return r.Method + " " + r.URL.RequestURI() +} + +func getStatus(r *rest.Request) string { + if r.Env["STATUS_CODE"] != nil { + return strconv.Itoa(r.Env["STATUS_CODE"].(int)) + } + return "-" +} + +func getBytes(r *rest.Request) string { + if r.Env["BYTES_WRITTEN"] != nil { + return strconv.FormatInt(r.Env["BYTES_WRITTEN"].(int64), 10) + } + return "-" +} + +func getUserAgent(r *rest.Request) string { + if r.UserAgent() != "" { + return r.UserAgent() + } + return "-" +} diff --git a/core/api/org.go b/core/api/org.go new file mode 100644 index 0000000..f4f09ee --- /dev/null +++ b/core/api/org.go @@ -0,0 +1,394 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" +) + +/** + * @api {get} /org/:orgId Get Org by id + * @apiVersion 1.0.0 + * @apiName GetOrg + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Org. + * @apiSuccess {Date} inserted Date Org was created + * @apiSuccess {Date} updated Date Org was updated + * @apiSuccess {String} name Name of the Org. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "MyOrg", + * "currency": "USD", + * "precision": 2, + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetOrg(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + org, err := model.Instance.GetOrg(orgId, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&org) +} + +/** + * @api {get} /orgs Get a User's Orgs + * @apiVersion 1.0.0 + * @apiName GetOrgs + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Org. + * @apiSuccess {Date} inserted Date Org was created + * @apiSuccess {Date} updated Date Org was updated + * @apiSuccess {String} name Name of the Org. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "MyOrg", + * "currency": "USD", + * "precision": 2, + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetOrgs(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + + orgs, err := model.Instance.GetOrgs(user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&orgs) +} + +/** + * @api {post} /orgs Create a new Org + * @apiVersion 1.0.0 + * @apiName PostOrg + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id Id 32 character hex string + * @apiParam {String} name Name of the Org. + * @apiParam {String} currency Three letter currency code. + * @apiParam {Number} precision How many digits the currency goes out to. + * + * @apiSuccess {String} id Id of the Org. + * @apiSuccess {Date} inserted Date Org was created + * @apiSuccess {Date} updated Date Org was updated + * @apiSuccess {String} name Name of the Org. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "MyOrg", + * "currency": "USD", + * "precision": 2, + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostOrg(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + org := types.Org{Precision: 2} + err := r.DecodeJsonPayload(&org) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + err = model.Instance.CreateOrg(&org, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&org) +} + +/** + * @api {put} /orgs/:orgId Modify an Org + * @apiVersion 1.0.0 + * @apiName PutOrg + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} name Name of the Org. + * + * @apiSuccess {String} id Id of the Org. + * @apiSuccess {Date} inserted Date Org was created + * @apiSuccess {Date} updated Date Org was updated + * @apiSuccess {String} name Name of the Org. + * @apiSuccess {String} currency Three letter currency code. + * @apiSuccess {Number} precision How many digits the currency goes out to. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "name": "MyOrg", + * "currency": "USD", + * "precision": 2, + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PutOrg(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + org := types.Org{} + err := r.DecodeJsonPayload(&org) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + org.Id = orgId + + err = model.Instance.UpdateOrg(&org, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&org) +} + +/** + * @api {post} /orgs/:orgId/invites Invite a user to an Org + * @apiVersion 1.0.0 + * @apiName PostInvite + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} email Email address of user + * + * @apiSuccess {String} id Id of the Invite + * @apiSuccess {orgId} id Id of the Org + * @apiSuccess {Date} inserted Date Invite was created + * @apiSuccess {Date} updated Date Invite was updated/accepted + * @apiSuccess {String} email Email address of user + * @apiSuccess {String} accepted true if user has accepted + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "a1b2c3d4", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "email": "johndoe@email.com", + * "accepted": false + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostInvite(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + invite := types.Invite{} + err := r.DecodeJsonPayload(&invite) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + invite.OrgId = orgId + + err = model.Instance.CreateInvite(&invite, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&invite) +} + +/** + * @api {put} /orgs/:orgId/invites/:inviteId Accept an invitation + * @apiVersion 1.0.0 + * @apiName PutInvite + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} accepted true + * + * @apiSuccess {String} id Id of the Invite + * @apiSuccess {orgId} id Id of the Org + * @apiSuccess {Date} inserted Date Invite was created + * @apiSuccess {Date} updated Date Invite was updated/accepted + * @apiSuccess {String} email Email address of user + * @apiSuccess {String} accepted true if user has accepted + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "a1b2c3d4", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "email": "johndoe@email.com", + * "accepted": true + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PutInvite(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + //orgId := r.PathParam("orgId") + inviteId := r.PathParam("inviteId") + + invite := types.Invite{} + err := r.DecodeJsonPayload(&invite) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + invite.Id = inviteId + + err = model.Instance.AcceptInvite(&invite, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&invite) +} + +/** + * @api {get} /orgs/:orgId/invites Get Org invites + * @apiVersion 1.0.0 + * @apiName GetInvites + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Invite + * @apiSuccess {orgId} id Id of the Org + * @apiSuccess {Date} inserted Date Invite was created + * @apiSuccess {Date} updated Date Invite was updated/accepted + * @apiSuccess {String} email Email address of user + * @apiSuccess {String} accepted true if user has accepted + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "a1b2c3d4", + * "orgId": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "email": "johndoe@email.com", + * "accepted": true + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetInvites(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + invites, err := model.Instance.GetInvites(orgId, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&invites) +} + +/** + * @api {delete} /orgs/:orgId/invites/:inviteId Delete Invite + * @apiVersion 1.0.0 + * @apiName DeleteInvite + * @apiGroup Org + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeleteInvite(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + inviteId := r.PathParam("inviteId") + + err := model.Instance.DeleteInvite(inviteId, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/price.go b/core/api/price.go new file mode 100644 index 0000000..6fad175 --- /dev/null +++ b/core/api/price.go @@ -0,0 +1,183 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" + "strconv" + "time" +) + +/** + * @api {get} /org/:orgId/prices Get prices nearest in time or by currency + * @apiVersion 1.0.0 + * @apiName GetPrices + * @apiGroup Price + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {Number} nearestDate Milliseconds since epoch + * @apiParam {String} currency Currency code + * + * @apiSuccess {String} id Id of the Price. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} currency Currency code. + * @apiSuccess {Date} date Date of the Price. + * @apiSuccess {Date} inserted Date when Price was posted. + * @apiSuccess {Date} updated Date when Price was updated. + * @apiSuccess {Number} price Price of currency measured in native Org currency. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "currency": "EUR", + * "date": "2018-09-11T18:05:04.420Z", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "price": 1.16 + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetPrices(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + // TODO how do we make date an optional parameter + // instead of resorting to this hack? + nearestDate := time.Date(2100, time.January, 1, 0, 0, 0, 0, time.UTC) + + nearestDateParam := r.URL.Query().Get("nearestDate") + currencyParam := r.URL.Query().Get("currency") + + // If currency was specified, get all prices for that currency + if currencyParam != "" { + prices, err := model.Instance.GetPricesByCurrency(orgId, currencyParam, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(prices) + return + } + + if nearestDateParam != "" { + nearestDateParamNumeric, err := strconv.ParseInt(nearestDateParam, 10, 64) + + if err != nil { + rest.Error(w, "invalid date", 400) + return + } + nearestDate = time.Unix(0, nearestDateParamNumeric*1000000) + } + + // Get prices nearest in time + prices, err := model.Instance.GetPricesNearestInTime(orgId, nearestDate, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(prices) +} + +/** + * @api {post} /orgs/:orgId/prices Create a new Price + * @apiVersion 1.0.0 + * @apiName PostPrice + * @apiGroup Price + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id Id 32 character hex string. + * @apiParam {String} orgId Id of the Org. + * @apiParam {String} currency Currency code. + * @apiParam {Date} date Date of the Price. + * @apiParam {Number} price Price of currency measured in native Org currency. + * + * @apiSuccess {String} id Id of the Price. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} currency Currency code. + * @apiSuccess {Date} date Date of the Price. + * @apiSuccess {Date} inserted Date when Price was posted. + * @apiSuccess {Date} updated Date when Price was updated. + * @apiSuccess {Number} price Price of currency measured in native Org currency. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "currency": "EUR", + * "date": "2018-09-11T18:05:04.420Z", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "price": 1.16 + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostPrice(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + price := types.Price{} + + err := r.DecodeJsonPayload(&price) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + price.OrgId = orgId + err = model.Instance.CreatePrice(&price, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&price) +} + +/** + * @api {delete} /orgs/:orgId/prices/:priceId Delete a Price + * @apiVersion 1.0.0 + * @apiName DeletePrice + * @apiGroup Price + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeletePrice(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + priceId := r.PathParam("priceId") + + err := model.Instance.DeletePrice(priceId, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/routes.go b/core/api/routes.go new file mode 100644 index 0000000..075d9eb --- /dev/null +++ b/core/api/routes.go @@ -0,0 +1,48 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/ws" +) + +func GetRouter(auth *AuthMiddleware) (rest.App, error) { + return rest.MakeRouter( + rest.Get("/api/user", auth.RequireAuth(GetUser)), + rest.Put("/api/user", PutUser), + rest.Post("/api/user/verify", VerifyUser), + rest.Post("/api/user/reset-password", ResetPassword), + rest.Post("/api/users", PostUser), + rest.Post("/api/orgs", auth.RequireAuth(PostOrg)), + rest.Get("/api/orgs", auth.RequireAuth(GetOrgs)), + rest.Get("/api/orgs/:orgId", auth.RequireAuth(GetOrg)), + rest.Put("/api/orgs/:orgId", auth.RequireAuth(PutOrg)), + rest.Get("/api/orgs/:orgId/ledgers", auth.RequireAuth(GetOrgAccounts)), + rest.Post("/api/orgs/:orgId/ledgers", auth.RequireAuth(PostAccount)), + rest.Put("/api/orgs/:orgId/ledgers/:accountId", auth.RequireAuth(PutAccount)), + rest.Delete("/api/orgs/:orgId/ledgers/:accountId", auth.RequireAuth(DeleteAccount)), + rest.Get("/api/orgs/:orgId/ledgers/:accountId/transactions", auth.RequireAuth(GetTransactionsByAccount)), + rest.Get("/api/orgs/:orgId/accounts", auth.RequireAuth(GetOrgAccounts)), + rest.Post("/api/orgs/:orgId/accounts", auth.RequireAuth(PostAccount)), + rest.Put("/api/orgs/:orgId/accounts/:accountId", auth.RequireAuth(PutAccount)), + rest.Delete("/api/orgs/:orgId/accounts/:accountId", auth.RequireAuth(DeleteAccount)), + rest.Get("/api/orgs/:orgId/accounts/:accountId/transactions", auth.RequireAuth(GetTransactionsByAccount)), + rest.Get("/api/orgs/:orgId/transactions", auth.RequireAuth(GetTransactionsByOrg)), + rest.Post("/api/orgs/:orgId/transactions", auth.RequireAuth(PostTransaction)), + rest.Put("/api/orgs/:orgId/transactions/:transactionId", auth.RequireAuth(PutTransaction)), + rest.Delete("/api/orgs/:orgId/transactions/:transactionId", auth.RequireAuth(DeleteTransaction)), + rest.Get("/api/orgs/:orgId/prices", auth.RequireAuth(GetPrices)), + rest.Post("/api/orgs/:orgId/prices", auth.RequireAuth(PostPrice)), + rest.Delete("/api/orgs/:orgId/prices/:priceId", auth.RequireAuth(DeletePrice)), + rest.Get("/ws", ws.Handler), + rest.Post("/api/sessions", auth.RequireAuth(PostSession)), + rest.Delete("/api/sessions/:sessionId", auth.RequireAuth(DeleteSession)), + rest.Get("/api/apikeys", auth.RequireAuth(GetApiKeys)), + rest.Post("/api/apikeys", auth.RequireAuth(PostApiKey)), + rest.Put("/api/apikeys/:apiKeyId", auth.RequireAuth(PutApiKey)), + rest.Delete("/api/apikeys/:apiKeyId", auth.RequireAuth(DeleteApiKey)), + rest.Get("/api/orgs/:orgId/invites", auth.RequireAuth(GetInvites)), + rest.Post("/api/orgs/:orgId/invites", auth.RequireAuth(PostInvite)), + rest.Put("/api/orgs/:orgId/invites/:inviteId", auth.RequireAuth(PutInvite)), + rest.Delete("/api/orgs/:orgId/invites/:inviteId", auth.RequireAuth(DeleteInvite)), + ) +} diff --git a/core/api/session.go b/core/api/session.go new file mode 100644 index 0000000..c1b5b15 --- /dev/null +++ b/core/api/session.go @@ -0,0 +1,87 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" +) + +/** + * @api {post} /sessions Create a new Session + * @apiVersion 1.0.0 + * @apiName PostSession + * @apiGroup Session + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * @apiHeader {String} Authorization HTTP Basic Auth + * + * @apiParam {String} id 32 character hex string + * + * @apiSuccess {String} id Id of the Session. + * @apiSuccess {Date} inserted Date Session was created + * @apiSuccess {Date} updated Date Last activity for the Session + * @apiSuccess {String} userId Id of the User + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "userId": "22222222222222222222222222222222" + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostSession(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + session := &types.Session{} + + err := r.DecodeJsonPayload(session) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + session.UserId = user.Id + + err = model.Instance.CreateSession(session) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(session) +} + +/** + * @api {delete} /sessions/:sessionId Log out of a Session + * @apiVersion 1.0.0 + * @apiName DeleteSession + * @apiGroup Session + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeleteSession(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + sessionId := r.PathParam("sessionId") + + err := model.Instance.DeleteSession(sessionId, user.Id) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/transaction.go b/core/api/transaction.go new file mode 100644 index 0000000..a898b2c --- /dev/null +++ b/core/api/transaction.go @@ -0,0 +1,362 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" +) + +/** + * @api {get} /orgs/:orgId/accounts/:accountId/transactions Get Transactions by Account Id + * @apiVersion 1.0.0 + * @apiName GetAccountTransactions + * @apiGroup Transaction + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Transaction. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} userId Id of the User who created the Transaction. + * @apiSuccess {Date} date Date of the Transaction + * @apiSuccess {Date} inserted Date Transaction was created + * @apiSuccess {Date} updated Date Transaction was updated + * @apiSuccess {String} description Description of Transaction + * @apiSuccess {String} data Extra data field + * @apiSuccess {Object[]} splits Array of Transaction Splits + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "userId": "11111111111111111111111111111111", + * "date": "2018-06-08T20:12:29.720Z", + * "inserted": "2018-06-08T20:12:29.720Z", + * "updated": "2018-06-08T20:12:29.720Z", + * "description": "Treat friend to lunch", + * "data:": "{\"key\": \"value\"}", + * "splits": [ + * { + * "accountId": "11111111111111111111111111111111", + * "amount": -2000, + * "nativeAmount": -2000 + * }, + * { + * "accountId": "22222222222222222222222222222222", + * "amount": 1000, + * "nativeAmount": 1000 + * }, + * { + * "accountId": "33333333333333333333333333333333", + * "amount": 1000, + * "nativeAmount": 1000 + * } + * ] + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetTransactionsByAccount(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + accountId := r.PathParam("accountId") + + queryOptions, err := types.QueryOptionsFromURLQuery(r.URL.Query()) + + if err != nil { + rest.Error(w, "invalid query options", 400) + return + } + + sTxs, err := model.Instance.GetTransactionsByAccount(orgId, user.Id, accountId, queryOptions) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&sTxs) +} + +/** + * @api {get} /orgs/:orgId/transactions Get Transactions by Org Id + * @apiVersion 1.0.0 + * @apiName GetOrgTransactions + * @apiGroup Transaction + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the Transaction. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} userId Id of the User who created the Transaction. + * @apiSuccess {Date} date Date of the Transaction + * @apiSuccess {Date} inserted Date Transaction was created + * @apiSuccess {Date} updated Date Transaction was updated + * @apiSuccess {String} description Description of Transaction + * @apiSuccess {String} data Extra data field + * @apiSuccess {Object[]} splits Array of Transaction Splits + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * [ + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "userId": "11111111111111111111111111111111", + * "date": "2018-06-08T20:12:29.720Z", + * "inserted": "2018-06-08T20:12:29.720Z", + * "updated": "2018-06-08T20:12:29.720Z", + * "description": "Treat friend to lunch", + * "data:": "{\"key\": \"value\"}", + * "splits": [ + * { + * "accountId": "11111111111111111111111111111111", + * "amount": -2000, + * "nativeAmount": -2000 + * }, + * { + * "accountId": "22222222222222222222222222222222", + * "amount": 1000, + * "nativeAmount": 1000 + * }, + * { + * "accountId": "33333333333333333333333333333333", + * "amount": 1000, + * "nativeAmount": 1000 + * } + * ] + * } + * ] + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetTransactionsByOrg(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + queryOptions, err := types.QueryOptionsFromURLQuery(r.URL.Query()) + + if err != nil { + rest.Error(w, "invalid query options", 400) + return + } + + sTxs, err := model.Instance.GetTransactionsByOrg(orgId, user.Id, queryOptions) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(&sTxs) +} + +/** + * @api {post} /orgs/:orgId/transactions Create a new Transaction + * @apiVersion 1.0.0 + * @apiName PostTransaction + * @apiGroup Transaction + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id Id 32 character hex string + * @apiParam {Date} date Date of the Transaction + * @apiParam {String} description Description of Transaction + * @apiParam {String} data Extra data field + * @apiParam {Object[]} splits Array of Transaction Splits. nativeAmounts must add up to 0. + * @apiParam {String} splits.accountId Id of Account + * @apiParam {Number} splits.amount Amount of split in Account currency + * @apiParam {Number} splits.nativeAmount Amount of split in Org currency + * + * @apiSuccess {String} id Id of the Transaction. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} userId Id of the User who created the Transaction. + * @apiSuccess {Date} date Date of the Transaction + * @apiSuccess {Date} inserted Date Transaction was created + * @apiSuccess {Date} updated Date Transaction was updated + * @apiSuccess {String} description Description of Transaction + * @apiSuccess {String} data Extra data field + * @apiSuccess {Object[]} splits Array of Transaction Splits + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "userId": "11111111111111111111111111111111", + * "date": "2018-06-08T20:12:29.720Z", + * "inserted": "2018-06-08T20:12:29.720Z", + * "updated": "2018-06-08T20:12:29.720Z", + * "description": "Treat friend to lunch", + * "data:": "{\"key\": \"value\"}", + * "splits": [ + * { + * "accountId": "11111111111111111111111111111111", + * "amount": -2000, + * "nativeAmount": -2000 + * }, + * { + * "accountId": "22222222222222222222222222222222", + * "amount": 1000, + * "nativeAmount": 1000 + * }, + * { + * "accountId": "33333333333333333333333333333333", + * "amount": 1000, + * "nativeAmount": 1000 + * } + * ] + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PostTransaction(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + + sTx := types.Transaction{} + err := r.DecodeJsonPayload(&sTx) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + sTx.OrgId = orgId + sTx.UserId = user.Id + + err = model.Instance.CreateTransaction(&sTx) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(sTx) +} + +/** + * @api {put} /orgs/:orgId/transactions/:transactionId Modify a Transaction + * @apiVersion 1.0.0 + * @apiName PutTransaction + * @apiGroup Transaction + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id 32 character hex string + * @apiParam {Date} date Date of the Transaction + * @apiParam {String} description Description of Transaction + * @apiParam {String} data Extra data field + * @apiParam {Object[]} splits Array of Transaction Splits. nativeAmounts must add up to 0. + * @apiParam {String} splits.accountId Id of Account + * @apiParam {Number} splits.amount Amount of split in Account currency + * @apiParam {Number} splits.nativeAmount Amount of split in Org currency + * + * @apiSuccess {String} id Id of the Transaction. + * @apiSuccess {String} orgId Id of the Org. + * @apiSuccess {String} userId Id of the User who created the Transaction. + * @apiSuccess {Date} date Date of the Transaction + * @apiSuccess {Date} inserted Date Transaction was created + * @apiSuccess {Date} updated Date Transaction was updated + * @apiSuccess {String} description Description of Transaction + * @apiSuccess {String} data Extra data field + * @apiSuccess {Object[]} splits Array of Transaction Splits + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "orgId": "11111111111111111111111111111111", + * "userId": "11111111111111111111111111111111", + * "date": "2018-06-08T20:12:29.720Z", + * "inserted": "2018-06-08T20:12:29.720Z", + * "updated": "2018-06-08T20:12:29.720Z", + * "description": "Treat friend to lunch", + * "data:": "{\"key\": \"value\"}", + * "splits": [ + * { + * "accountId": "11111111111111111111111111111111", + * "amount": -2000, + * "nativeAmount": -2000 + * }, + * { + * "accountId": "22222222222222222222222222222222", + * "amount": 1000, + * "nativeAmount": 1000 + * }, + * { + * "accountId": "33333333333333333333333333333333", + * "amount": 1000, + * "nativeAmount": 1000 + * } + * ] + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func PutTransaction(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + transactionId := r.PathParam("transactionId") + + sTx := types.Transaction{} + err := r.DecodeJsonPayload(&sTx) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + sTx.OrgId = orgId + sTx.UserId = user.Id + + err = model.Instance.UpdateTransaction(transactionId, &sTx) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(sTx) +} + +/** + * @api {delete} /orgs/:orgId/transactions/:transactionId Delete a Transaction + * @apiVersion 1.0.0 + * @apiName DeleteTransaction + * @apiGroup Transaction + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func DeleteTransaction(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + orgId := r.PathParam("orgId") + transactionId := r.PathParam("transactionId") + + err := model.Instance.DeleteTransaction(transactionId, user.Id, orgId) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/core/api/user.go b/core/api/user.go new file mode 100644 index 0000000..bc35a73 --- /dev/null +++ b/core/api/user.go @@ -0,0 +1,265 @@ +package api + +import ( + "github.com/ant0ine/go-json-rest/rest" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/types" + "net/http" +) + +type VerifyUserParams struct { + Code string `json:"code"` +} + +type ConfirmResetPasswordParams struct { + Code string `json:"code"` + Password string `json:"password"` +} + +type ResetPasswordParams struct { + Email string `json:"email"` +} + +/** + * @api {get} /user Get Authenticated User + * @apiVersion 1.0.0 + * @apiName GetUser + * @apiGroup User + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiSuccess {String} id Id of the User. + * @apiSuccess {Date} inserted Date User was created + * @apiSuccess {Date} updated Date User was updated + * @apiSuccess {String} firstName First name of the User. + * @apiSuccess {String} lastName Last name of the User. + * @apiSuccess {String} email Email of the User. + * @apiSuccess {Boolean} agreeToTerms Agree to terms + * @apiSuccess {Boolean} emailVerified True if email has been verified. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "firstName": "John", + * "lastName": "Doe", + * "email": "johndoe@email.com", + * "agreeToTerms": true, + * "emailVerified": true + * } + * + * @apiUse NotAuthorizedError + * @apiUse InternalServerError + */ +func GetUser(w rest.ResponseWriter, r *rest.Request) { + user := r.Env["USER"].(*types.User) + + w.WriteJson(&user) +} + +/** + * @api {post} /users Create a new User + * @apiVersion 1.0.0 + * @apiName PostUser + * @apiGroup User + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} id 32 character hex string + * @apiParam {String} firstName First name of the User. + * @apiParam {String} lastName Last name of the User. + * @apiParam {String} email Email of the User. + * @apiParam {String} password Password of the User. + * @apiParam {Boolean} agreeToTerms True if you agree to terms + * + * @apiSuccess {String} id Id of the User. + * @apiSuccess {Date} inserted Date User was created + * @apiSuccess {Date} updated Date User was updated + * @apiSuccess {String} firstName First name of the User. + * @apiSuccess {String} lastName Last name of the User. + * @apiSuccess {String} email Email of the User. + * @apiSuccess {Boolean} emailVerified True if email has been verified. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "firstName": "John", + * "lastName": "Doe", + * "email": "johndoe@email.com", + * "agreeToTerms": true, + * "emailVerified": true + * } + * + * @apiUse InternalServerError + */ +func PostUser(w rest.ResponseWriter, r *rest.Request) { + user := &types.User{} + err := r.DecodeJsonPayload(user) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + err = model.Instance.CreateUser(user) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(user) +} + +/** + * @api {put} /user Modify User + * @apiVersion 1.0.0 + * @apiName PutUser + * @apiGroup User + * + * @apiHeader {String} Authorization HTTP Basic Auth + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} password New password + * @apiParam {String} code Password reset code. (Instead of Authorization header) + * + * @apiSuccess {String} id Id of the User. + * @apiSuccess {Date} inserted Date User was created + * @apiSuccess {Date} updated Date User was updated + * @apiSuccess {String} firstName First name of the User. + * @apiSuccess {String} lastName Last name of the User. + * @apiSuccess {String} email Email of the User. + * @apiSuccess {Boolean} emailVerified True if email has been verified. + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * { + * "id": "11111111111111111111111111111111", + * "inserted": "2018-09-11T18:05:04.420Z", + * "updated": "2018-09-11T18:05:04.420Z", + * "firstName": "John", + * "lastName": "Doe", + * "email": "johndoe@email.com", + * "agreeToTerms": true, + * "emailVerified": true + * } + * + * @apiUse InternalServerError + */ +func PutUser(w rest.ResponseWriter, r *rest.Request) { + if r.Env["USER"] == nil { + // password reset + params := &ConfirmResetPasswordParams{} + err := r.DecodeJsonPayload(params) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + user, err := model.Instance.ConfirmResetPassword(params.Password, params.Code) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(user) + return + } + + // Otherwise it's an authenticated PUT + + user := r.Env["USER"].(*types.User) + + newUser := &types.User{} + err := r.DecodeJsonPayload(newUser) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + user.Password = newUser.Password + + err = model.Instance.UpdateUser(user) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteJson(user) +} + +/** + * @api {post} /user/verify Verify user email address + * @apiVersion 1.0.0 + * @apiName VerifyUser + * @apiGroup User + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} code Email verification code + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse InternalServerError + */ +func VerifyUser(w rest.ResponseWriter, r *rest.Request) { + params := &VerifyUserParams{} + + err := r.DecodeJsonPayload(params) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + err = model.Instance.VerifyUser(params.Code) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} + +/** + * @api {post} /user/reset-password Send reset password email + * @apiVersion 1.0.0 + * @apiName ResetPassword + * @apiGroup User + * + * @apiHeader {String} Accept-Version ^1.0.0 semver versioning + * + * @apiParam {String} email Email address for user + * + * @apiSuccessExample Success-Response: + * HTTP/1.1 200 OK + * + * @apiUse InternalServerError + */ +func ResetPassword(w rest.ResponseWriter, r *rest.Request) { + params := &ResetPasswordParams{} + + err := r.DecodeJsonPayload(params) + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + err = model.Instance.ResetPassword(params.Email) + + if err != nil { + rest.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) + return +} diff --git a/core/api/version.go b/core/api/version.go new file mode 100644 index 0000000..39eb10c --- /dev/null +++ b/core/api/version.go @@ -0,0 +1,48 @@ +package api + +import ( + "github.com/Masterminds/semver" + "github.com/ant0ine/go-json-rest/rest" + "net/http" +) + +type VersionMiddleware struct { +} + +// MiddlewareFunc makes AuthMiddleware implement the Middleware interface. +func (mw *VersionMiddleware) MiddlewareFunc(handler rest.HandlerFunc) rest.HandlerFunc { + return func(writer rest.ResponseWriter, request *rest.Request) { + version := request.Header.Get("Accept-Version") + + // Don't require version header for websockets + if request.URL.String() == "/ws" { + handler(writer, request) + return + } + + if version == "" { + rest.Error(writer, "Accept-Version header required", http.StatusBadRequest) + return + } + + constraint, err := semver.NewConstraint(version) + + if err != nil { + rest.Error(writer, "Invalid version", http.StatusBadRequest) + } + + serverVersion, _ := semver.NewVersion("1.0.0") + // Pre-release versions + compatVersion, _ := semver.NewVersion("0.1.8") + + versionMatch := constraint.Check(serverVersion) + compatMatch := constraint.Check(compatVersion) + + if versionMatch == false && compatMatch == false { + rest.Error(writer, "Invalid version", http.StatusBadRequest) + return + } + + handler(writer, request) + } +} diff --git a/core/auth/auth.go b/core/auth/auth.go new file mode 100644 index 0000000..9a16423 --- /dev/null +++ b/core/auth/auth.go @@ -0,0 +1,91 @@ +package auth + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" +) + +var Instance Interface + +type AuthService struct { + db db.Datastore + bcrypt util.Bcrypt +} + +type Interface interface { + Authenticate(string, string) (*types.User, error) + AuthenticateUser(email string, password string) (*types.User, error) + AuthenticateSession(string) (*types.User, error) + AuthenticateApiKey(string) (*types.User, error) +} + +func NewAuthService(db db.Datastore, bcrypt util.Bcrypt) *AuthService { + authService := &AuthService{db: db, bcrypt: bcrypt} + Instance = authService + return authService +} + +func (auth *AuthService) Authenticate(emailOrKey string, password string) (*types.User, error) { + // authenticate via session, apikey or user + user, err := auth.AuthenticateSession(emailOrKey) + + if err == nil { + return user, nil + } + + user, err = auth.AuthenticateApiKey(emailOrKey) + + if err == nil { + return user, nil + } + + user, err = auth.AuthenticateUser(emailOrKey, password) + + if err == nil { + return user, nil + } + + return nil, errors.New("Unauthorized") +} + +func (auth *AuthService) AuthenticateUser(email string, password string) (*types.User, error) { + u, err := auth.db.GetVerifiedUserByEmail(email) + + if err != nil { + return nil, errors.New("Invalid email or password") + } + + err = auth.bcrypt.CompareHashAndPassword([]byte(u.PasswordHash), []byte(password)) + + if err != nil { + return nil, errors.New("Invalid email or password") + } + + return u, nil +} + +func (auth *AuthService) AuthenticateSession(id string) (*types.User, error) { + u, err := auth.db.GetUserByActiveSession(id) + + if err != nil { + return nil, errors.New("Invalid session") + } + + auth.db.UpdateSessionActivity(id) + + return u, nil +} + +func (auth *AuthService) AuthenticateApiKey(id string) (*types.User, error) { + u, err := auth.db.GetUserByApiKey(id) + + if err != nil { + return nil, errors.New("Access denied") + } + + auth.db.UpdateApiKeyActivity(id) + + return u, nil +} diff --git a/core/auth/auth_test.go b/core/auth/auth_test.go new file mode 100644 index 0000000..3cb96df --- /dev/null +++ b/core/auth/auth_test.go @@ -0,0 +1,90 @@ +package auth + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +type TdUser struct { + db.Datastore + testNum int +} + +func (td *TdUser) GetVerifiedUserByEmail(email string) (*types.User, error) { + switch td.testNum { + case 1: + return td.GetVerifiedUserByEmail_1(email) + case 2: + return td.GetVerifiedUserByEmail_2(email) + } + + return nil, errors.New("test error") +} + +func (td *TdUser) GetVerifiedUserByEmail_1(email string) (*types.User, error) { + return &types.User{ + "1", + time.Unix(0, 0), + time.Unix(0, 0), + "John", + "Doe", + "johndoe@email.com", + "password", + "$2a$10$KrtvADe7jwrmYIe3GXFbNupOQaPIvyOKeng5826g4VGOD47TpAisG", + true, + "", + false, + "", + }, nil +} + +func (td *TdUser) GetVerifiedUserByEmail_2(email string) (*types.User, error) { + return nil, errors.New("sql error") +} + +func TestAuthenticateUser(t *testing.T) { + tests := map[string]struct { + err error + email string + password string + saltedHash string + testNum int + }{ + "successful": { + err: nil, + email: "johndoe@email.com", + password: "password", + saltedHash: "$2a$10$KrtvADe7jwrmYIe3GXFbNupOQaPIvyOKeng5826g4VGOD47TpAisG", + testNum: 1, + }, + "non-existing user": { + err: errors.New("Invalid email or password"), + email: "nouser@email.com", + password: "password", + saltedHash: "", + testNum: 2, + }, + "wrong password": { + err: errors.New("Invalid email or password"), + email: "johndoe@email.com", + password: "bad", + saltedHash: "$2a$10$KrtvADe7jwrmYIe3GXFbNupOQaPIvyOKeng5826g4VGOD47TpAisG", + testNum: 1, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + authService := NewAuthService(&TdUser{testNum: test.testNum}, new(util.StandardBcrypt)) + + _, err := authService.AuthenticateUser(test.email, test.password) + + assert.Equal(t, err, test.err) + } +} diff --git a/core/mocks/Bcrypt.go b/core/mocks/Bcrypt.go new file mode 100644 index 0000000..a4a4a7e --- /dev/null +++ b/core/mocks/Bcrypt.go @@ -0,0 +1,60 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Bcrypt is an autogenerated mock type for the Bcrypt type +type Bcrypt struct { + mock.Mock +} + +// CompareHashAndPassword provides a mock function with given fields: _a0, _a1 +func (_m *Bcrypt) CompareHashAndPassword(_a0 []byte, _a1 []byte) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func([]byte, []byte) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GenerateFromPassword provides a mock function with given fields: _a0, _a1 +func (_m *Bcrypt) GenerateFromPassword(_a0 []byte, _a1 int) ([]byte, error) { + ret := _m.Called(_a0, _a1) + + var r0 []byte + if rf, ok := ret.Get(0).(func([]byte, int) []byte); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte, int) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetDefaultCost provides a mock function with given fields: +func (_m *Bcrypt) GetDefaultCost() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} diff --git a/core/mocks/Datastore.go b/core/mocks/Datastore.go new file mode 100644 index 0000000..ac1c132 --- /dev/null +++ b/core/mocks/Datastore.go @@ -0,0 +1,970 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. +package mocks + +import mock "github.com/stretchr/testify/mock" +import time "time" +import types "github.com/openaccounting/oa-server/core/model/types" + +// Datastore is an autogenerated mock type for the Datastore type +type Datastore struct { + mock.Mock +} + +// AcceptInvite provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AcceptInvite(_a0 *types.Invite, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Invite, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddBalance provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddBalance(_a0 *types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddBalances provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddBalances(_a0 []*types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func([]*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddNativeBalanceCost provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddNativeBalanceCost(_a0 *types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddNativeBalanceNearestInTime provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddNativeBalanceNearestInTime(_a0 *types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddNativeBalancesCost provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddNativeBalancesCost(_a0 []*types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func([]*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddNativeBalancesNearestInTime provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) AddNativeBalancesNearestInTime(_a0 []*types.Account, _a1 time.Time) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func([]*types.Account, time.Time) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CreateOrg provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Datastore) CreateOrg(_a0 *types.Org, _a1 string, _a2 []*types.Account) error { + ret := _m.Called(_a0, _a1, _a2) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Org, string, []*types.Account) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteAccount provides a mock function with given fields: id +func (_m *Datastore) DeleteAccount(id string) error { + ret := _m.Called(id) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteAndInsertTransaction provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) DeleteAndInsertTransaction(_a0 string, _a1 *types.Transaction) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(string, *types.Transaction) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteApiKey provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) DeleteApiKey(_a0 string, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteInvite provides a mock function with given fields: _a0 +func (_m *Datastore) DeleteInvite(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeletePrice provides a mock function with given fields: _a0 +func (_m *Datastore) DeletePrice(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteSession provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) DeleteSession(_a0 string, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteTransaction provides a mock function with given fields: _a0 +func (_m *Datastore) DeleteTransaction(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Escape provides a mock function with given fields: _a0 +func (_m *Datastore) Escape(_a0 string) string { + ret := _m.Called(_a0) + + var r0 string + if rf, ok := ret.Get(0).(func(string) string); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// GetAccount provides a mock function with given fields: _a0 +func (_m *Datastore) GetAccount(_a0 string) (*types.Account, error) { + ret := _m.Called(_a0) + + var r0 *types.Account + if rf, ok := ret.Get(0).(func(string) *types.Account); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Account) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetAccountsByOrgId provides a mock function with given fields: orgId +func (_m *Datastore) GetAccountsByOrgId(orgId string) ([]*types.Account, error) { + ret := _m.Called(orgId) + + var r0 []*types.Account + if rf, ok := ret.Get(0).(func(string) []*types.Account); ok { + r0 = rf(orgId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Account) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(orgId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetApiKeys provides a mock function with given fields: _a0 +func (_m *Datastore) GetApiKeys(_a0 string) ([]*types.ApiKey, error) { + ret := _m.Called(_a0) + + var r0 []*types.ApiKey + if rf, ok := ret.Get(0).(func(string) []*types.ApiKey); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.ApiKey) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetChildCountByAccountId provides a mock function with given fields: id +func (_m *Datastore) GetChildCountByAccountId(id string) (int64, error) { + ret := _m.Called(id) + + var r0 int64 + if rf, ok := ret.Get(0).(func(string) int64); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(int64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetInvite provides a mock function with given fields: _a0 +func (_m *Datastore) GetInvite(_a0 string) (*types.Invite, error) { + ret := _m.Called(_a0) + + var r0 *types.Invite + if rf, ok := ret.Get(0).(func(string) *types.Invite); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Invite) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetInvites provides a mock function with given fields: _a0 +func (_m *Datastore) GetInvites(_a0 string) ([]*types.Invite, error) { + ret := _m.Called(_a0) + + var r0 []*types.Invite + if rf, ok := ret.Get(0).(func(string) []*types.Invite); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Invite) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOrg provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) GetOrg(_a0 string, _a1 string) (*types.Org, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.Org + if rf, ok := ret.Get(0).(func(string, string) *types.Org); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Org) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOrgAdmins provides a mock function with given fields: _a0 +func (_m *Datastore) GetOrgAdmins(_a0 string) ([]*types.User, error) { + ret := _m.Called(_a0) + + var r0 []*types.User + if rf, ok := ret.Get(0).(func(string) []*types.User); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.User) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOrgUserIds provides a mock function with given fields: _a0 +func (_m *Datastore) GetOrgUserIds(_a0 string) ([]string, error) { + ret := _m.Called(_a0) + + var r0 []string + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetOrgs provides a mock function with given fields: _a0 +func (_m *Datastore) GetOrgs(_a0 string) ([]*types.Org, error) { + ret := _m.Called(_a0) + + var r0 []*types.Org + if rf, ok := ret.Get(0).(func(string) []*types.Org); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Org) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPermissionedAccountIds provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Datastore) GetPermissionedAccountIds(_a0 string, _a1 string, _a2 string) ([]string, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 []string + if rf, ok := ret.Get(0).(func(string, string, string) []string); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string, string) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPriceById provides a mock function with given fields: _a0 +func (_m *Datastore) GetPriceById(_a0 string) (*types.Price, error) { + ret := _m.Called(_a0) + + var r0 *types.Price + if rf, ok := ret.Get(0).(func(string) *types.Price); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Price) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPricesByCurrency provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) GetPricesByCurrency(_a0 string, _a1 string) ([]*types.Price, error) { + ret := _m.Called(_a0, _a1) + + var r0 []*types.Price + if rf, ok := ret.Get(0).(func(string, string) []*types.Price); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Price) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetPricesNearestInTime provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) GetPricesNearestInTime(_a0 string, _a1 time.Time) ([]*types.Price, error) { + ret := _m.Called(_a0, _a1) + + var r0 []*types.Price + if rf, ok := ret.Get(0).(func(string, time.Time) []*types.Price); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Price) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, time.Time) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRootAccount provides a mock function with given fields: _a0 +func (_m *Datastore) GetRootAccount(_a0 string) (*types.Account, error) { + ret := _m.Called(_a0) + + var r0 *types.Account + if rf, ok := ret.Get(0).(func(string) *types.Account); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Account) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSplitCountByAccountId provides a mock function with given fields: id +func (_m *Datastore) GetSplitCountByAccountId(id string) (int64, error) { + ret := _m.Called(id) + + var r0 int64 + if rf, ok := ret.Get(0).(func(string) int64); ok { + r0 = rf(id) + } else { + r0 = ret.Get(0).(int64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionById provides a mock function with given fields: _a0 +func (_m *Datastore) GetTransactionById(_a0 string) (*types.Transaction, error) { + ret := _m.Called(_a0) + + var r0 *types.Transaction + if rf, ok := ret.Get(0).(func(string) *types.Transaction); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Transaction) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionsByAccount provides a mock function with given fields: _a0, _a1 +func (_m *Datastore) GetTransactionsByAccount(_a0 string, _a1 *types.QueryOptions) ([]*types.Transaction, error) { + ret := _m.Called(_a0, _a1) + + var r0 []*types.Transaction + if rf, ok := ret.Get(0).(func(string, *types.QueryOptions) []*types.Transaction); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Transaction) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *types.QueryOptions) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetTransactionsByOrg provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Datastore) GetTransactionsByOrg(_a0 string, _a1 *types.QueryOptions, _a2 []string) ([]*types.Transaction, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 []*types.Transaction + if rf, ok := ret.Get(0).(func(string, *types.QueryOptions, []string) []*types.Transaction); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*types.Transaction) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *types.QueryOptions, []string) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUserByActiveSession provides a mock function with given fields: _a0 +func (_m *Datastore) GetUserByActiveSession(_a0 string) (*types.User, error) { + ret := _m.Called(_a0) + + var r0 *types.User + if rf, ok := ret.Get(0).(func(string) *types.User); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.User) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUserByApiKey provides a mock function with given fields: _a0 +func (_m *Datastore) GetUserByApiKey(_a0 string) (*types.User, error) { + ret := _m.Called(_a0) + + var r0 *types.User + if rf, ok := ret.Get(0).(func(string) *types.User); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.User) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetUserByResetCode provides a mock function with given fields: _a0 +func (_m *Datastore) GetUserByResetCode(_a0 string) (*types.User, error) { + ret := _m.Called(_a0) + + var r0 *types.User + if rf, ok := ret.Get(0).(func(string) *types.User); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.User) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetVerifiedUserByEmail provides a mock function with given fields: _a0 +func (_m *Datastore) GetVerifiedUserByEmail(_a0 string) (*types.User, error) { + ret := _m.Called(_a0) + + var r0 *types.User + if rf, ok := ret.Get(0).(func(string) *types.User); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.User) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InsertAccount provides a mock function with given fields: account +func (_m *Datastore) InsertAccount(account *types.Account) error { + ret := _m.Called(account) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Account) error); ok { + r0 = rf(account) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertApiKey provides a mock function with given fields: _a0 +func (_m *Datastore) InsertApiKey(_a0 *types.ApiKey) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.ApiKey) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertInvite provides a mock function with given fields: _a0 +func (_m *Datastore) InsertInvite(_a0 *types.Invite) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Invite) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertPrice provides a mock function with given fields: _a0 +func (_m *Datastore) InsertPrice(_a0 *types.Price) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Price) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertSession provides a mock function with given fields: _a0 +func (_m *Datastore) InsertSession(_a0 *types.Session) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Session) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertTransaction provides a mock function with given fields: _a0 +func (_m *Datastore) InsertTransaction(_a0 *types.Transaction) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Transaction) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InsertUser provides a mock function with given fields: _a0 +func (_m *Datastore) InsertUser(_a0 *types.User) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.User) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateAccount provides a mock function with given fields: account +func (_m *Datastore) UpdateAccount(account *types.Account) error { + ret := _m.Called(account) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Account) error); ok { + r0 = rf(account) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateApiKey provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateApiKey(_a0 *types.ApiKey) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.ApiKey) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateApiKeyActivity provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateApiKeyActivity(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateOrg provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateOrg(_a0 *types.Org) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.Org) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateSessionActivity provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateSessionActivity(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateUser provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateUser(_a0 *types.User) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.User) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UpdateUserResetPassword provides a mock function with given fields: _a0 +func (_m *Datastore) UpdateUserResetPassword(_a0 *types.User) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*types.User) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// VerifyUser provides a mock function with given fields: _a0 +func (_m *Datastore) VerifyUser(_a0 string) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/core/model/account.go b/core/model/account.go new file mode 100644 index 0000000..67f0e49 --- /dev/null +++ b/core/model/account.go @@ -0,0 +1,377 @@ +package model + +import ( + "errors" + "fmt" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/ws" + "sort" + "time" +) + +type AccountInterface interface { + CreateAccount(account *types.Account, userId string) error + UpdateAccount(account *types.Account, userId string) error + DeleteAccount(id string, userId string, orgId string) error + GetAccounts(orgId string, userId string, tokenId string) ([]*types.Account, error) + GetAccountsWithBalances(orgId string, userId string, tokenId string, date time.Time) ([]*types.Account, error) +} + +type ByName []*types.Account + +func (a ByName) Len() int { return len(a) } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +func (model *Model) CreateAccount(account *types.Account, userId string) (err error) { + if account.Id == "" { + return errors.New("id required") + } + + if account.OrgId == "" { + return errors.New("orgId required") + } + + if account.Name == "" { + return errors.New("name required") + } + + if account.Currency == "" { + return errors.New("currency required") + } + + userAccounts, err := model.GetAccounts(account.OrgId, userId, "") + + if err != nil { + return + } + + if !model.accountsContainWriteAccess(userAccounts, account.Parent) { + return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", account.Parent)) + } + + err = model.db.InsertAccount(account) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access account + userIds, err2 := model.db.GetOrgUserIds(account.OrgId) + + if err2 == nil { + ws.PushAccount(account, userIds, "create") + } + + return +} + +func (model *Model) UpdateAccount(account *types.Account, userId string) (err error) { + if account.Id == "" { + return errors.New("id required") + } + + if account.OrgId == "" { + return errors.New("orgId required") + } + + if account.Name == "" { + return errors.New("name required") + } + + if account.Currency == "" { + return errors.New("currency required") + } + + if account.Parent == account.Id { + return errors.New("account cannot be its own parent") + } + + userAccounts, err := model.GetAccounts(account.OrgId, userId, "") + + if err != nil { + return + } + + if !model.accountsContainWriteAccess(userAccounts, account.Parent) { + return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", account.Parent)) + } + + err = model.db.UpdateAccount(account) + + if err != nil { + return + } + + err = model.db.AddBalance(account, time.Now()) + + if err != nil { + return + } + + err = model.db.AddNativeBalanceCost(account, time.Now()) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access account + userIds, err2 := model.db.GetOrgUserIds(account.OrgId) + + if err2 == nil { + ws.PushAccount(account, userIds, "update") + } + + return +} + +func (model *Model) DeleteAccount(id string, userId string, orgId string) (err error) { + // TODO make sure user is part of org + + // check to make sure user has permission + userAccounts, err := model.GetAccounts(orgId, userId, "") + + if err != nil { + return + } + + if !model.accountsContainWriteAccess(userAccounts, id) { + return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", id)) + } + + // don't allow deleting of accounts that have transactions or child accounts + count, err := model.db.GetSplitCountByAccountId(id) + + if err != nil { + return + } + + if count != 0 { + return errors.New("Cannot delete an account that has transactions") + } + + count, err = model.db.GetChildCountByAccountId(id) + + if err != nil { + return + } + + if count != 0 { + return errors.New("Cannot delete an account that has children") + } + + account, err := model.db.GetAccount(id) + + if err != nil { + return + } + + err = model.db.DeleteAccount(id) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access account + userIds, err2 := model.db.GetOrgUserIds(account.OrgId) + + if err2 == nil { + ws.PushAccount(account, userIds, "delete") + } + + return +} + +func (model *Model) getAccounts(orgId string, userId string, tokenId string, date time.Time, withBalances bool) ([]*types.Account, error) { + permissionedAccounts, err := model.db.GetPermissionedAccountIds(orgId, userId, "") + if err != nil { + return nil, err + } + + var allAccounts []*types.Account + + if withBalances == true { + allAccounts, err = model.getAllAccountsWithBalances(orgId, date) + } else { + allAccounts, err = model.getAllAccounts(orgId) + } + + if err != nil { + return nil, err + } + + accountMap := model.makeAccountMap(allAccounts) + writeAccessMap := make(map[string]*types.Account) + readAccessMap := make(map[string]*types.Account) + + for _, accountId := range permissionedAccounts { + writeAccessMap[accountId] = accountMap[accountId].Account + + // parents are read only + parents := model.getParents(accountId, accountMap) + + for _, parentAccount := range parents { + readAccessMap[parentAccount.Id] = parentAccount + } + + // top level accounts are initially read only unless user has permission + topLevelAccounts := model.getTopLevelAccounts(accountMap) + + for _, topLevelAccount := range topLevelAccounts { + readAccessMap[topLevelAccount.Id] = topLevelAccount + } + + // Children have write access + children := model.getChildren(accountId, accountMap) + + for _, childAccount := range children { + writeAccessMap[childAccount.Id] = childAccount + } + } + + filtered := make([]*types.Account, 0) + + for _, account := range writeAccessMap { + filtered = append(filtered, account) + } + + for id, account := range readAccessMap { + _, ok := writeAccessMap[id] + + if ok == false { + account.ReadOnly = true + filtered = append(filtered, account) + } + } + + // TODO sort by inserted + sort.Sort(ByName(filtered)) + + return filtered, nil +} + +func (model *Model) GetAccounts(orgId string, userId string, tokenId string) ([]*types.Account, error) { + return model.getAccounts(orgId, userId, tokenId, time.Time{}, false) +} + +func (model *Model) GetAccountsWithBalances(orgId string, userId string, tokenId string, date time.Time) ([]*types.Account, error) { + return model.getAccounts(orgId, userId, tokenId, date, true) +} + +func (model *Model) getAllAccounts(orgId string) ([]*types.Account, error) { + return model.db.GetAccountsByOrgId(orgId) +} + +func (model *Model) getAllAccountsWithBalances(orgId string, date time.Time) ([]*types.Account, error) { + accounts, err := model.db.GetAccountsByOrgId(orgId) + + if err != nil { + return nil, err + } + + err = model.db.AddBalances(accounts, date) + + if err != nil { + return nil, err + } + + err = model.db.AddNativeBalancesCost(accounts, date) + + if err != nil { + return nil, err + } + + return accounts, nil +} + +func (model *Model) makeAccountMap(accounts []*types.Account) map[string]*types.AccountNode { + m := make(map[string]*types.AccountNode) + + for _, account := range accounts { + m[account.Id] = &types.AccountNode{ + Account: account, + Parent: nil, + Children: nil, + } + } + + for _, account := range accounts { + m[account.Id].Parent = m[account.Parent] + + if value, ok := m[account.Parent]; ok { + value.Children = append(value.Children, m[account.Id]) + value.Account.HasChildren = true + } + } + + return m +} + +func (model *Model) getChildren(parentId string, accountMap map[string]*types.AccountNode) []*types.Account { + if _, ok := accountMap[parentId]; !ok { + return nil + } + + children := make([]*types.Account, 0) + + for _, childAccountNode := range accountMap[parentId].Children { + children = append(children, childAccountNode.Account) + grandChildren := model.getChildren(childAccountNode.Account.Id, accountMap) + children = append(children, grandChildren...) + } + + return children +} + +func (model *Model) getParents(accountId string, accountMap map[string]*types.AccountNode) []*types.Account { + node, ok := accountMap[accountId] + + if !ok { + return nil + } + + if node.Parent == nil { + return make([]*types.Account, 0) + } + + parents := model.getParents(node.Parent.Account.Id, accountMap) + return append(parents, node.Parent.Account) +} + +func (model *Model) accountsContainWriteAccess(accounts []*types.Account, accountId string) bool { + for _, account := range accounts { + if account.Id == accountId && !account.ReadOnly { + return true + } + } + return false +} + +func (model *Model) getAccountFromList(accounts []*types.Account, accountId string) *types.Account { + for _, account := range accounts { + if account.Id == accountId { + return account + } + } + return nil +} + +func (model *Model) getTopLevelAccounts(accountMap map[string]*types.AccountNode) []*types.Account { + accounts := make([]*types.Account, 0) + + for _, node := range accountMap { + if node.Parent == nil { + accounts = append(accounts, node.Account) + + for _, child := range node.Children { + accounts = append(accounts, child.Account) + } + break + } + } + + return accounts +} diff --git a/core/model/account_test.go b/core/model/account_test.go new file mode 100644 index 0000000..32fb972 --- /dev/null +++ b/core/model/account_test.go @@ -0,0 +1,330 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" + "time" +) + +type TdAccount struct { + db.Datastore + mock.Mock +} + +func (td *TdAccount) GetPermissionedAccountIds(userId string, orgId string, tokenId string) ([]string, error) { + // User has permission to only "Assets" account + return []string{"2"}, nil +} + +func (td *TdAccount) GetAccountsByOrgId(orgId string) ([]*types.Account, error) { + args := td.Called(orgId) + return args.Get(0).([]*types.Account), args.Error(1) +} + +func (td *TdAccount) InsertAccount(account *types.Account) error { + return nil +} + +func (td *TdAccount) UpdateAccount(account *types.Account) error { + return nil +} + +func (td *TdAccount) AddBalance(account *types.Account, date time.Time) error { + return nil +} + +func (td *TdAccount) AddNativeBalanceNearestInTime(account *types.Account, date time.Time) error { + return nil +} + +func (td *TdAccount) AddNativeBalanceCost(account *types.Account, date time.Time) error { + return nil +} + +func (td *TdAccount) AddBalances(accounts []*types.Account, date time.Time) error { + balance := int64(1000) + for _, account := range accounts { + account.Balance = &balance + } + + return nil +} + +func (td *TdAccount) AddNativeBalancesNearestInTime(accounts []*types.Account, date time.Time) error { + balance := int64(1000) + for _, account := range accounts { + account.NativeBalance = &balance + } + + return nil +} + +func (td *TdAccount) AddNativeBalancesCost(accounts []*types.Account, date time.Time) error { + balance := int64(1000) + for _, account := range accounts { + account.NativeBalance = &balance + } + + return nil +} + +func (td *TdAccount) GetSplitCountByAccountId(id string) (int64, error) { + args := td.Called(id) + return args.Get(0).(int64), args.Error(1) +} + +func (td *TdAccount) GetChildCountByAccountId(id string) (int64, error) { + args := td.Called(id) + return args.Get(0).(int64), args.Error(1) +} + +func (td *TdAccount) DeleteAccount(id string) error { + return nil +} + +func (td *TdAccount) GetOrgUserIds(id string) ([]string, error) { + return []string{"1"}, nil +} + +func (td *TdAccount) GetAccount(id string) (*types.Account, error) { + return &types.Account{}, nil +} + +func getTestAccounts() []*types.Account { + return []*types.Account{ + &types.Account{ + Id: "2", + OrgId: "1", + Name: "Assets", + Parent: "1", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + &types.Account{ + Id: "3", + OrgId: "1", + Name: "Current Assets", + Parent: "2", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + &types.Account{ + Id: "1", + OrgId: "1", + Name: "Root", + Parent: "", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + } +} + +func TestCreateAccount(t *testing.T) { + tests := map[string]struct { + err error + account *types.Account + }{ + "success": { + err: nil, + account: &types.Account{ + Id: "1", + OrgId: "1", + Name: "Cash", + Parent: "3", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + }, + "permission error": { + err: errors.New("user does not have permission to access account 1"), + account: &types.Account{ + Id: "1", + OrgId: "1", + Name: "Cash", + Parent: "1", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdAccount{} + td.On("GetAccountsByOrgId", "1").Return(getTestAccounts(), nil) + + model := NewModel(td, nil, types.Config{}) + + err := model.CreateAccount(test.account, "1") + assert.Equal(t, test.err, err) + } +} + +func TestUpdateAccount(t *testing.T) { + tests := map[string]struct { + err error + account *types.Account + }{ + "success": { + err: nil, + account: &types.Account{ + Id: "3", + OrgId: "1", + Name: "Current Assets2", + Parent: "2", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + }, + "error": { + err: errors.New("account cannot be its own parent"), + account: &types.Account{ + Id: "3", + OrgId: "1", + Name: "Current Assets", + Parent: "3", + Currency: "USD", + Precision: 2, + DebitBalance: true, + }, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdAccount{} + td.On("GetAccountsByOrgId", "1").Return(getTestAccounts(), nil) + + model := NewModel(td, nil, types.Config{}) + + err := model.UpdateAccount(test.account, "1") + assert.Equal(t, test.err, err) + + if err == nil { + td.AssertExpectations(t) + } + } +} + +func TestDeleteAccount(t *testing.T) { + tests := map[string]struct { + err error + accountId string + count int64 + }{ + "success": { + err: nil, + accountId: "3", + count: 0, + }, + "error": { + err: errors.New("Cannot delete an account that has transactions"), + accountId: "3", + count: 1, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdAccount{} + td.On("GetAccountsByOrgId", "1").Return(getTestAccounts(), nil) + td.On("GetSplitCountByAccountId", test.accountId).Return(test.count, nil) + td.On("GetChildCountByAccountId", test.accountId).Return(test.count, nil) + + model := NewModel(td, nil, types.Config{}) + + err := model.DeleteAccount(test.accountId, "1", "1") + assert.Equal(t, test.err, err) + + if err == nil { + td.AssertExpectations(t) + } + } +} + +func TestGetAccounts(t *testing.T) { + tests := map[string]struct { + err error + }{ + "success": { + err: nil, + }, + // "error": { + // err: errors.New("db error"), + // }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdAccount{} + td.On("GetAccountsByOrgId", "1").Return(getTestAccounts(), test.err) + + model := NewModel(td, nil, types.Config{}) + + accounts, err := model.GetAccounts("1", "1", "") + + assert.Equal(t, test.err, err) + + if err == nil { + td.AssertExpectations(t) + assert.Equal(t, 3, len(accounts)) + assert.Equal(t, false, accounts[0].ReadOnly) + assert.Equal(t, false, accounts[1].ReadOnly) + assert.Equal(t, true, accounts[2].ReadOnly) + } + } +} + +func TestGetAccountsWithBalances(t *testing.T) { + tests := map[string]struct { + err error + }{ + "success": { + err: nil, + }, + "error": { + err: errors.New("db error"), + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdAccount{} + td.On("GetAccountsByOrgId", "1").Return(getTestAccounts(), test.err) + + model := NewModel(td, nil, types.Config{}) + + accounts, err := model.GetAccountsWithBalances("1", "1", "", time.Now()) + + assert.Equal(t, test.err, err) + + if err == nil { + td.AssertExpectations(t) + assert.Equal(t, 3, len(accounts)) + assert.Equal(t, false, accounts[0].ReadOnly) + assert.Equal(t, false, accounts[1].ReadOnly) + assert.Equal(t, true, accounts[2].ReadOnly) + + assert.Equal(t, int64(1000), *accounts[0].Balance) + assert.Equal(t, int64(1000), *accounts[1].Balance) + + assert.Equal(t, int64(1000), *accounts[0].NativeBalance) + assert.Equal(t, int64(1000), *accounts[1].NativeBalance) + } + } +} diff --git a/core/model/apikey.go b/core/model/apikey.go new file mode 100644 index 0000000..9f75c64 --- /dev/null +++ b/core/model/apikey.go @@ -0,0 +1,37 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" +) + +type ApiKeyInterface interface { + CreateApiKey(*types.ApiKey) error + UpdateApiKey(*types.ApiKey) error + DeleteApiKey(string, string) error + GetApiKeys(string) ([]*types.ApiKey, error) +} + +func (model *Model) CreateApiKey(key *types.ApiKey) error { + if key.Id == "" { + return errors.New("id required") + } + + return model.db.InsertApiKey(key) +} + +func (model *Model) UpdateApiKey(key *types.ApiKey) error { + if key.Id == "" { + return errors.New("id required") + } + + return model.db.UpdateApiKey(key) +} + +func (model *Model) DeleteApiKey(id string, userId string) error { + return model.db.DeleteApiKey(id, userId) +} + +func (model *Model) GetApiKeys(userId string) ([]*types.ApiKey, error) { + return model.db.GetApiKeys(userId) +} diff --git a/core/model/db/account.go b/core/model/db/account.go new file mode 100644 index 0000000..6bc4e4b --- /dev/null +++ b/core/model/db/account.go @@ -0,0 +1,391 @@ +package db + +import ( + "database/sql" + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "math" + "strings" + "time" +) + +const emptyAccountId = "00000000000000000000000000000000" + +type AccountInterface interface { + InsertAccount(account *types.Account) error + UpdateAccount(account *types.Account) error + GetAccount(string) (*types.Account, error) + GetAccountsByOrgId(orgId string) ([]*types.Account, error) + GetPermissionedAccountIds(string, string, string) ([]string, error) + GetSplitCountByAccountId(id string) (int64, error) + GetChildCountByAccountId(id string) (int64, error) + DeleteAccount(id string) error + AddBalances([]*types.Account, time.Time) error + AddNativeBalancesCost([]*types.Account, time.Time) error + AddNativeBalancesNearestInTime([]*types.Account, time.Time) error + AddBalance(*types.Account, time.Time) error + AddNativeBalanceCost(*types.Account, time.Time) error + AddNativeBalanceNearestInTime(*types.Account, time.Time) error + GetRootAccount(string) (*types.Account, error) +} + +func (db *DB) InsertAccount(account *types.Account) error { + account.Inserted = time.Now() + account.Updated = account.Inserted + + query := "INSERT INTO account(id,orgId,inserted,updated,name,parent,currency,`precision`,debitBalance) VALUES(UNHEX(?),UNHEX(?),?,?,?,UNHEX(?),?,?,?)" + _, err := db.Exec( + query, + account.Id, + account.OrgId, + util.TimeToMs(account.Inserted), + util.TimeToMs(account.Updated), + account.Name, + account.Parent, + account.Currency, + account.Precision, + account.DebitBalance) + + return err +} + +func (db *DB) UpdateAccount(account *types.Account) error { + account.Updated = time.Now() + + query := "UPDATE account SET updated = ?, name = ?, parent = UNHEX(?), currency = ?, `precision` = ?, debitBalance = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(account.Updated), + account.Name, + account.Parent, + account.Currency, + account.Precision, + account.DebitBalance, + account.Id) + + return err +} + +func (db *DB) GetAccount(id string) (*types.Account, error) { + a := types.Account{} + var inserted int64 + var updated int64 + + err := db.QueryRow("SELECT LOWER(HEX(id)),LOWER(HEX(orgId)),inserted,updated,name,LOWER(HEX(parent)),currency,`precision`,debitBalance FROM account WHERE id = UNHEX(?)", id). + Scan(&a.Id, &a.OrgId, &inserted, &updated, &a.Name, &a.Parent, &a.Currency, &a.Precision, &a.DebitBalance) + + if a.Parent == emptyAccountId { + a.Parent = "" + } + + switch { + case err == sql.ErrNoRows: + return nil, errors.New("Account not found") + case err != nil: + return nil, err + default: + a.Inserted = util.MsToTime(inserted) + a.Updated = util.MsToTime(updated) + return &a, nil + } +} + +func (db *DB) GetAccountsByOrgId(orgId string) ([]*types.Account, error) { + rows, err := db.Query("SELECT LOWER(HEX(id)),LOWER(HEX(orgId)),inserted,updated,name,LOWER(HEX(parent)),currency,`precision`,debitBalance FROM account WHERE orgId = UNHEX(?)", orgId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + accounts := make([]*types.Account, 0) + + for rows.Next() { + a := new(types.Account) + var inserted int64 + var updated int64 + + err = rows.Scan(&a.Id, &a.OrgId, &inserted, &updated, &a.Name, &a.Parent, &a.Currency, &a.Precision, &a.DebitBalance) + if err != nil { + return nil, err + } + + if a.Parent == emptyAccountId { + a.Parent = "" + } + + a.Inserted = util.MsToTime(inserted) + a.Updated = util.MsToTime(updated) + + accounts = append(accounts, a) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return accounts, nil +} + +func (db *DB) GetPermissionedAccountIds(orgId string, userId string, tokenId string) ([]string, error) { + // Get user permissions + // TODO incorporate tokens + rows, err := db.Query("SELECT LOWER(HEX(accountId)) FROM permission WHERE orgId = UNHEX(?) AND userId = UNHEX(?)", orgId, userId) + + if err != nil { + return nil, err + } + defer rows.Close() + + var permissionedAccounts []string + + var id string + + for rows.Next() { + err := rows.Scan(&id) + if err != nil { + return nil, err + } + + permissionedAccounts = append(permissionedAccounts, id) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return permissionedAccounts, nil +} + +func (db *DB) GetSplitCountByAccountId(id string) (int64, error) { + var count int64 + + query := "SELECT COUNT(*) FROM split WHERE deleted = false AND accountId = UNHEX(?)" + + err := db.QueryRow(query, id).Scan(&count) + + return count, err +} + +func (db *DB) GetChildCountByAccountId(id string) (int64, error) { + var count int64 + query := "SELECT COUNT(*) FROM account WHERE parent = UNHEX(?)" + + err := db.QueryRow(query, id).Scan(&count) + + return count, err +} + +func (db *DB) DeleteAccount(id string) error { + query := "DELETE FROM account WHERE id = UNHEX(?)" + + _, err := db.Exec(query, id) + + return err +} + +func (db *DB) AddBalances(accounts []*types.Account, date time.Time) error { + // TODO optimize + ids := make([]string, len(accounts)) + + for i, account := range accounts { + ids[i] = "UNHEX(\"" + account.Id + "\")" + } + + balanceMap := make(map[string]*int64) + + query := "SELECT LOWER(HEX(accountId)), SUM(amount) FROM split WHERE deleted = false AND accountId IN (" + + strings.Join(ids, ",") + ")" + + " AND date < ? GROUP BY accountId" + + rows, err := db.Query(query, util.TimeToMs(date)) + + if err != nil { + return err + } + + defer rows.Close() + + for rows.Next() { + var id string + var balance int64 + err := rows.Scan(&id, &balance) + if err != nil { + return err + } + + balanceMap[id] = &balance + } + + err = rows.Err() + + if err != nil { + return err + } + + for _, account := range accounts { + account.Balance = balanceMap[account.Id] + } + + return nil +} + +func (db *DB) AddNativeBalancesCost(accounts []*types.Account, date time.Time) error { + // TODO optimize + ids := make([]string, len(accounts)) + + for i, account := range accounts { + ids[i] = "UNHEX(\"" + account.Id + "\")" + } + + balanceMap := make(map[string]*int64) + + query := "SELECT LOWER(HEX(accountId)), SUM(nativeAmount) FROM split WHERE deleted = false AND accountId IN (" + + strings.Join(ids, ",") + ")" + + " AND date < ? GROUP BY accountId" + + rows, err := db.Query(query, util.TimeToMs(date)) + + if err != nil { + return err + } + + defer rows.Close() + + for rows.Next() { + var id string + var balance int64 + err := rows.Scan(&id, &balance) + if err != nil { + return err + } + + balanceMap[id] = &balance + } + + err = rows.Err() + + if err != nil { + return err + } + + for _, account := range accounts { + account.NativeBalance = balanceMap[account.Id] + } + + return nil +} + +func (db *DB) AddNativeBalancesNearestInTime(accounts []*types.Account, date time.Time) error { + // TODO Don't look up org currency every single time + + for _, account := range accounts { + err := db.AddNativeBalanceNearestInTime(account, date) + if err != nil { + return err + } + } + return nil +} + +func (db *DB) AddBalance(account *types.Account, date time.Time) error { + var balance sql.NullInt64 + + query := "SELECT SUM(amount) FROM split WHERE deleted = false AND accountId = UNHEX(?) AND date < ?" + + err := db.QueryRow(query, account.Id, util.TimeToMs(date)).Scan(&balance) + + if err != nil { + return err + } + + account.Balance = &balance.Int64 + + return nil +} + +func (db *DB) AddNativeBalanceCost(account *types.Account, date time.Time) error { + var nativeBalance sql.NullInt64 + + query := "SELECT SUM(nativeAmount) FROM split WHERE deleted = false AND accountId = UNHEX(?) AND date < ?" + + err := db.QueryRow(query, account.Id, util.TimeToMs(date)).Scan(&nativeBalance) + + if err != nil { + return err + } + + account.NativeBalance = &nativeBalance.Int64 + + return nil +} + +func (db *DB) AddNativeBalanceNearestInTime(account *types.Account, date time.Time) error { + var orgCurrency string + var orgPrecision int + + query1 := "SELECT currency,`precision` FROM org WHERE id = UNHEX(?)" + + err := db.QueryRow(query1, account.OrgId).Scan(&orgCurrency, &orgPrecision) + + if err != nil { + return err + } + + if account.Balance == nil { + return nil + } + + if orgCurrency == account.Currency { + nativeBalance := int64(*account.Balance) + account.NativeBalance = &nativeBalance + return nil + } + + var tmp sql.NullInt64 + var price float64 + + query2 := "SELECT ABS(CAST(date AS SIGNED) - ?) AS datediff, price FROM price WHERE currency = ? ORDER BY datediff ASC LIMIT 1" + + err = db.QueryRow(query2, util.TimeToMs(date), account.Currency).Scan(&tmp, &price) + + if err == sql.ErrNoRows { + nativeBalance := int64(0) + account.NativeBalance = &nativeBalance + } else if err != nil { + return err + } + + precisionAdj := math.Pow(10, float64(account.Precision-orgPrecision)) + nativeBalance := int64(float64(*account.Balance) * price / precisionAdj) + account.NativeBalance = &nativeBalance + + return nil +} + +func (db *DB) GetRootAccount(orgId string) (*types.Account, error) { + a := types.Account{} + var inserted int64 + var updated int64 + + err := db.QueryRow( + "SELECT LOWER(HEX(id)),LOWER(HEX(orgId)),inserted,updated,name,LOWER(HEX(parent)),currency,`precision`,debitBalance FROM account WHERE orgId = UNHEX(?) AND parent = UNHEX(?)", + orgId, + emptyAccountId). + Scan(&a.Id, &a.OrgId, &inserted, &updated, &a.Name, &a.Parent, &a.Currency, &a.Precision, &a.DebitBalance) + + a.Parent = "" + + switch { + case err == sql.ErrNoRows: + return nil, errors.New("Account not found") + case err != nil: + return nil, err + default: + a.Inserted = util.MsToTime(inserted) + a.Updated = util.MsToTime(updated) + return &a, nil + } +} diff --git a/core/model/db/apikey.go b/core/model/db/apikey.go new file mode 100644 index 0000000..5eee2ea --- /dev/null +++ b/core/model/db/apikey.go @@ -0,0 +1,132 @@ +package db + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +type ApiKeyInterface interface { + InsertApiKey(*types.ApiKey) error + UpdateApiKey(*types.ApiKey) error + DeleteApiKey(string, string) error + GetApiKeys(string) ([]*types.ApiKey, error) + UpdateApiKeyActivity(string) error +} + +const apiKeyFields = "LOWER(HEX(id)),inserted,updated,LOWER(HEX(userId)),label" + +func (db *DB) InsertApiKey(key *types.ApiKey) error { + key.Inserted = time.Now() + key.Updated = key.Inserted + + query := "INSERT INTO apikey(id,inserted,updated,userId,label) VALUES(UNHEX(?),?,?,UNHEX(?),?)" + res, err := db.Exec( + query, + key.Id, + util.TimeToMs(key.Inserted), + util.TimeToMs(key.Updated), + key.UserId, + key.Label, + ) + if err != nil { + return err + } + + rowCnt, err := res.RowsAffected() + if err != nil { + return err + } + + if rowCnt < 1 { + return errors.New("Unable to insert apikey into db") + } + + return nil +} + +func (db *DB) UpdateApiKey(key *types.ApiKey) error { + key.Updated = time.Now() + + query := "UPDATE apikey SET updated = ?, label = ? WHERE deleted IS NULL AND id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(key.Updated), + key.Label, + key.Id, + ) + + if err != nil { + return err + } + + var inserted int64 + + err = db.QueryRow("SELECT inserted FROM apikey WHERE id = UNHEX(?)", key.Id).Scan(&inserted) + + if err != nil { + return err + } + + key.Inserted = util.MsToTime(inserted) + + return nil +} + +func (db *DB) DeleteApiKey(id string, userId string) error { + query := "UPDATE apikey SET deleted = ? WHERE id = UNHEX(?) AND userId = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(time.Now()), + id, + userId, + ) + + return err +} + +func (db *DB) GetApiKeys(userId string) ([]*types.ApiKey, error) { + rows, err := db.Query("SELECT "+apiKeyFields+" from apikey WHERE deleted IS NULL AND userId = UNHEX(?)", userId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + keys := make([]*types.ApiKey, 0) + + for rows.Next() { + k := new(types.ApiKey) + var inserted int64 + var updated int64 + + err = rows.Scan(&k.Id, &inserted, &updated, &k.UserId, &k.Label) + if err != nil { + return nil, err + } + + k.Inserted = util.MsToTime(inserted) + k.Updated = util.MsToTime(updated) + + keys = append(keys, k) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return keys, nil +} + +func (db *DB) UpdateApiKeyActivity(id string) error { + query := "UPDATE apikey SET updated = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(time.Now()), + id, + ) + + return err +} diff --git a/core/model/db/db.go b/core/model/db/db.go new file mode 100644 index 0000000..242cf0e --- /dev/null +++ b/core/model/db/db.go @@ -0,0 +1,76 @@ +package db + +import ( + "database/sql" + _ "github.com/go-sql-driver/mysql" +) + +type DB struct { + *sql.DB +} + +type Datastore interface { + Escape(string) string + UserInterface + OrgInterface + AccountInterface + TransactionInterface + PriceInterface + SessionInterface + ApiKeyInterface +} + +func NewDB(dataSourceName string) (*DB, error) { + var err error + db, err := sql.Open("mysql", dataSourceName) + if err != nil { + return nil, err + } + + if err = db.Ping(); err != nil { + return nil, err + } + + return &DB{db}, nil +} + +func (db *DB) Escape(sql string) string { + dest := make([]byte, 0, 2*len(sql)) + var escape byte + for i := 0; i < len(sql); i++ { + c := sql[i] + + escape = 0 + + switch c { + case 0: /* Must be escaped for 'mysql' */ + escape = '0' + break + case '\n': /* Must be escaped for logs */ + escape = 'n' + break + case '\r': + escape = 'r' + break + case '\\': + escape = '\\' + break + case '\'': + escape = '\'' + break + case '"': /* Better safe than sorry */ + escape = '"' + break + case '\032': /* This gives problems on Win32 */ + escape = 'Z' + } + + if escape != 0 { + dest = append(dest, '\\', escape) + } else { + dest = append(dest, c) + } + } + + return string(dest) +} diff --git a/core/model/db/org.go b/core/model/db/org.go new file mode 100644 index 0000000..1706007 --- /dev/null +++ b/core/model/db/org.go @@ -0,0 +1,370 @@ +package db + +import ( + "database/sql" + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +type OrgInterface interface { + CreateOrg(*types.Org, string, []*types.Account) error + UpdateOrg(*types.Org) error + GetOrg(string, string) (*types.Org, error) + GetOrgs(string) ([]*types.Org, error) + GetOrgUserIds(string) ([]string, error) + InsertInvite(*types.Invite) error + AcceptInvite(*types.Invite, string) error + GetInvites(string) ([]*types.Invite, error) + GetInvite(string) (*types.Invite, error) + DeleteInvite(string) error +} + +const orgFields = "LOWER(HEX(o.id)),o.inserted,o.updated,o.name,o.currency,o.`precision`" +const inviteFields = "i.id,LOWER(HEX(i.orgId)),i.inserted,i.updated,i.email,i.accepted" + +func (db *DB) CreateOrg(org *types.Org, userId string, accounts []*types.Account) (err error) { + tx, err := db.Begin() + + if err != nil { + return + } + + defer func() { + if p := recover(); p != nil { + tx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + tx.Rollback() + } else { + err = tx.Commit() + } + }() + + org.Inserted = time.Now() + org.Updated = org.Inserted + + // create org + query1 := "INSERT INTO org(id,inserted,updated,name,currency,`precision`) VALUES(UNHEX(?),?,?,?,?,?)" + + res, err := tx.Exec( + query1, + org.Id, + util.TimeToMs(org.Inserted), + util.TimeToMs(org.Updated), + org.Name, + org.Currency, + org.Precision, + ) + + if err != nil { + return + } + + // associate user with org + query2 := "INSERT INTO userorg(userId,orgId,admin) VALUES(UNHEX(?),UNHEX(?), 1)" + + res, err = tx.Exec(query2, userId, org.Id) + + if err != nil { + return + } + + _, err = res.LastInsertId() + + if err != nil { + return + } + + // create Accounts: Root, Assets, Liabilities, Equity, Income, Expenses + + for _, account := range accounts { + + query := "INSERT INTO account(id,orgId,inserted,updated,name,parent,currency,`precision`,debitBalance) VALUES (UNHEX(?),UNHEX(?),?,?,?,UNHEX(?),?,?,?)" + + if _, err = tx.Exec( + query, + account.Id, + org.Id, + util.TimeToMs(org.Inserted), + util.TimeToMs(org.Updated), + account.Name, + account.Parent, + account.Currency, + account.Precision, + account.DebitBalance, + ); err != nil { + return + } + } + + permissionId, err := util.NewGuid() + + if err != nil { + return + } + + // Grant root permission to user + + query3 := "INSERT INTO permission (id,userId,orgId,accountId,type,inserted,updated) VALUES(UNHEX(?),UNHEX(?),UNHEX(?),UNHEX(?),?,?,?)" + + _, err = tx.Exec( + query3, + permissionId, + userId, + org.Id, + accounts[0].Id, + 0, + util.TimeToMs(org.Inserted), + util.TimeToMs(org.Updated), + ) + + return +} + +func (db *DB) UpdateOrg(org *types.Org) error { + org.Updated = time.Now() + + query := "UPDATE org SET updated = ?, name = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(org.Updated), + org.Name, + org.Id, + ) + + return err +} + +func (db *DB) GetOrg(orgId string, userId string) (*types.Org, error) { + var o types.Org + var inserted int64 + var updated int64 + + err := db.QueryRow("SELECT "+orgFields+" FROM org o JOIN userorg ON userorg.orgId = o.id WHERE o.id = UNHEX(?) AND userorg.userId = UNHEX(?)", orgId, userId). + Scan(&o.Id, &inserted, &updated, &o.Name, &o.Currency, &o.Precision) + + switch { + case err == sql.ErrNoRows: + return nil, errors.New("Org not found") + case err != nil: + return nil, err + default: + o.Inserted = util.MsToTime(inserted) + o.Updated = util.MsToTime(updated) + return &o, nil + } +} + +func (db *DB) GetOrgs(userId string) ([]*types.Org, error) { + rows, err := db.Query("SELECT "+orgFields+" from org o JOIN userorg ON userorg.orgId = o.id WHERE userorg.userId = UNHEX(?)", userId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + orgs := make([]*types.Org, 0) + + for rows.Next() { + o := new(types.Org) + var inserted int64 + var updated int64 + + err = rows.Scan(&o.Id, &inserted, &updated, &o.Name, &o.Currency, &o.Precision) + if err != nil { + return nil, err + } + + o.Inserted = util.MsToTime(inserted) + o.Updated = util.MsToTime(updated) + + orgs = append(orgs, o) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return orgs, nil +} + +func (db *DB) GetOrgUserIds(orgId string) ([]string, error) { + rows, err := db.Query("SELECT LOWER(HEX(userId)) FROM userorg WHERE orgId = UNHEX(?)", orgId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + userIds := make([]string, 0) + + for rows.Next() { + var userId string + err = rows.Scan(&userId) + if err != nil { + return nil, err + } + + userIds = append(userIds, userId) + } + + err = rows.Err() + if err != nil { + return nil, err + } + + return userIds, nil +} + +func (db *DB) InsertInvite(invite *types.Invite) error { + invite.Inserted = time.Now() + invite.Updated = invite.Inserted + + query := "INSERT INTO invite(id,orgId,inserted,updated,email,accepted) VALUES(?,UNHEX(?),?,?,?,?)" + _, err := db.Exec( + query, + invite.Id, + invite.OrgId, + util.TimeToMs(invite.Inserted), + util.TimeToMs(invite.Updated), + invite.Email, + false, + ) + + return err +} + +func (db *DB) AcceptInvite(invite *types.Invite, userId string) error { + invite.Updated = time.Now() + + // Get root account for permission + rootAccount, err := db.GetRootAccount(invite.OrgId) + + if err != nil { + return err + } + + tx, err := db.Begin() + + if err != nil { + return err + } + + defer func() { + if p := recover(); p != nil { + tx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + tx.Rollback() + } else { + err = tx.Commit() + } + }() + + // associate user with org + query1 := "INSERT INTO userorg(userId,orgId,admin) VALUES(UNHEX(?),UNHEX(?), 0)" + + _, err = tx.Exec(query1, userId, invite.OrgId) + + if err != nil { + return err + } + + query2 := "UPDATE invite SET accepted = 1, updated = ? WHERE id = ?" + + _, err = tx.Exec(query2, util.TimeToMs(invite.Updated), invite.Id) + + // Grant root permission to user + + permissionId, err := util.NewGuid() + + if err != nil { + return err + } + + query3 := "INSERT INTO permission (id,userId,orgId,accountId,type,inserted,updated) VALUES(UNHEX(?),UNHEX(?),UNHEX(?),UNHEX(?),?,?,?)" + + _, err = tx.Exec( + query3, + permissionId, + userId, + invite.OrgId, + rootAccount.Id, + 0, + util.TimeToMs(invite.Updated), + util.TimeToMs(invite.Updated), + ) + + return err +} + +func (db *DB) GetInvites(orgId string) ([]*types.Invite, error) { + // don't include expired invoices + cutoff := util.TimeToMs(time.Now()) - 7*24*60*60*1000 + + rows, err := db.Query("SELECT "+inviteFields+" FROM invite i WHERE orgId = UNHEX(?) AND inserted > ?", orgId, cutoff) + + if err != nil { + return nil, err + } + + defer rows.Close() + + invites := make([]*types.Invite, 0) + + for rows.Next() { + i := new(types.Invite) + var inserted int64 + var updated int64 + + err = rows.Scan(&i.Id, &i.OrgId, &inserted, &updated, &i.Email, &i.Accepted) + if err != nil { + return nil, err + } + + i.Inserted = util.MsToTime(inserted) + i.Updated = util.MsToTime(updated) + + invites = append(invites, i) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return invites, nil +} + +func (db *DB) GetInvite(id string) (*types.Invite, error) { + var i types.Invite + var inserted int64 + var updated int64 + + err := db.QueryRow("SELECT "+inviteFields+" FROM invite i WHERE i.id = ?", id). + Scan(&i.Id, &i.OrgId, &inserted, &updated, &i.Email, &i.Accepted) + + switch { + case err == sql.ErrNoRows: + return nil, errors.New("Invite not found") + case err != nil: + return nil, err + default: + i.Inserted = util.MsToTime(inserted) + i.Updated = util.MsToTime(updated) + return &i, nil + } +} + +func (db *DB) DeleteInvite(id string) error { + query := "DELETE FROM invite WHERE id = ?" + _, err := db.Exec( + query, + id, + ) + + return err +} diff --git a/core/model/db/price.go b/core/model/db/price.go new file mode 100644 index 0000000..55f3f2c --- /dev/null +++ b/core/model/db/price.go @@ -0,0 +1,156 @@ +package db + +import ( + "database/sql" + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +type PriceInterface interface { + InsertPrice(*types.Price) error + GetPriceById(string) (*types.Price, error) + DeletePrice(string) error + GetPricesNearestInTime(string, time.Time) ([]*types.Price, error) + GetPricesByCurrency(string, string) ([]*types.Price, error) +} + +const priceFields = "LOWER(HEX(p.id)),LOWER(HEX(p.orgId)),p.currency,p.date,p.inserted,p.updated,p.price" + +func (db *DB) InsertPrice(price *types.Price) error { + price.Inserted = time.Now() + price.Updated = price.Inserted + + if price.Date.IsZero() { + price.Date = price.Inserted + } + + query := "INSERT INTO price(id,orgId,currency,date,inserted,updated,price) VALUES(UNHEX(?),UNHEX(?),?,?,?,?,?)" + _, err := db.Exec( + query, + price.Id, + price.OrgId, + price.Currency, + util.TimeToMs(price.Date), + util.TimeToMs(price.Inserted), + util.TimeToMs(price.Updated), + price.Price, + ) + + return err +} + +func (db *DB) GetPriceById(id string) (*types.Price, error) { + var p types.Price + var date int64 + var inserted int64 + var updated int64 + + err := db.QueryRow("SELECT "+priceFields+" FROM price p WHERE id = UNHEX(?)", id). + Scan(&p.Id, &p.OrgId, &p.Currency, &date, &inserted, &updated, &p.Price) + + switch { + case err == sql.ErrNoRows: + return nil, errors.New("Price not found") + case err != nil: + return nil, err + default: + p.Date = util.MsToTime(date) + p.Inserted = util.MsToTime(inserted) + p.Updated = util.MsToTime(updated) + return &p, nil + } +} + +func (db *DB) DeletePrice(id string) error { + query := "DELETE FROM price WHERE id = UNHEX(?)" + + _, err := db.Exec(query, id) + + return err +} + +func (db *DB) GetPricesNearestInTime(orgId string, date time.Time) ([]*types.Price, error) { + qSelect := "SELECT " + priceFields + qFrom := " FROM price p" + qJoin := " LEFT OUTER JOIN price p2 ON p.currency = p2.currency AND p.orgId = p2.orgId AND ABS(CAST(p.date AS SIGNED) - ?) > ABS(CAST(p2.date AS SIGNED) - ?)" + qWhere := " WHERE p2.id IS NULL AND p.orgId = UNHEX(?)" + + query := qSelect + qFrom + qJoin + qWhere + + rows, err := db.Query(query, util.TimeToMs(date), util.TimeToMs(date), orgId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + prices := make([]*types.Price, 0) + + for rows.Next() { + var date int64 + var inserted int64 + var updated int64 + p := new(types.Price) + err = rows.Scan(&p.Id, &p.OrgId, &p.Currency, &date, &inserted, &updated, &p.Price) + if err != nil { + return nil, err + } + + p.Date = util.MsToTime(date) + p.Inserted = util.MsToTime(inserted) + p.Updated = util.MsToTime(updated) + + prices = append(prices, p) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return prices, nil +} + +func (db *DB) GetPricesByCurrency(orgId string, currency string) ([]*types.Price, error) { + qSelect := "SELECT " + priceFields + qFrom := " FROM price p" + qWhere := " WHERE p.orgId = UNHEX(?) AND p.currency = ?" + pOrder := " ORDER BY date ASC" + + query := qSelect + qFrom + qWhere + pOrder + + rows, err := db.Query(query, orgId, currency) + + if err != nil { + return nil, err + } + + defer rows.Close() + + prices := make([]*types.Price, 0) + + for rows.Next() { + var date int64 + var inserted int64 + var updated int64 + p := new(types.Price) + err = rows.Scan(&p.Id, &p.OrgId, &p.Currency, &date, &inserted, &updated, &p.Price) + if err != nil { + return nil, err + } + + p.Date = util.MsToTime(date) + p.Inserted = util.MsToTime(inserted) + p.Updated = util.MsToTime(updated) + + prices = append(prices, p) + } + err = rows.Err() + if err != nil { + return nil, err + } + + return prices, nil +} diff --git a/core/model/db/session.go b/core/model/db/session.go new file mode 100644 index 0000000..9f3842d --- /dev/null +++ b/core/model/db/session.go @@ -0,0 +1,65 @@ +package db + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +type SessionInterface interface { + InsertSession(*types.Session) error + DeleteSession(string, string) error + UpdateSessionActivity(string) error +} + +func (db *DB) InsertSession(session *types.Session) error { + session.Inserted = time.Now() + session.Updated = session.Inserted + + query := "INSERT INTO session(id,inserted,updated,userId) VALUES(UNHEX(?),?,?,UNHEX(?))" + res, err := db.Exec( + query, + session.Id, + util.TimeToMs(session.Inserted), + util.TimeToMs(session.Updated), + session.UserId, + ) + if err != nil { + return err + } + + rowCnt, err := res.RowsAffected() + if err != nil { + return err + } + + if rowCnt < 1 { + return errors.New("Unable to insert session into db") + } + + return nil +} + +func (db *DB) DeleteSession(id string, userId string) error { + query := "UPDATE session SET `terminated` = ? WHERE id = UNHEX(?) AND userId = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(time.Now()), + id, + userId, + ) + + return err +} + +func (db *DB) UpdateSessionActivity(id string) error { + query := "UPDATE session SET updated = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(time.Now()), + id, + ) + + return err +} diff --git a/core/model/db/transaction.go b/core/model/db/transaction.go new file mode 100644 index 0000000..12efb6a --- /dev/null +++ b/core/model/db/transaction.go @@ -0,0 +1,558 @@ +package db + +import ( + "database/sql" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "strconv" + "strings" + "time" +) + +const txFields = "LOWER(HEX(id)),LOWER(HEX(orgId)),LOWER(HEX(userId)),date,inserted,updated,description,data,deleted" +const splitFields = "id,LOWER(HEX(transactionId)),LOWER(HEX(accountId)),date,inserted,updated,amount,nativeAmount,deleted" + +type TransactionInterface interface { + InsertTransaction(*types.Transaction) error + GetTransactionById(string) (*types.Transaction, error) + GetTransactionsByAccount(string, *types.QueryOptions) ([]*types.Transaction, error) + GetTransactionsByOrg(string, *types.QueryOptions, []string) ([]*types.Transaction, error) + DeleteTransaction(string) error + DeleteAndInsertTransaction(string, *types.Transaction) error +} + +func (db *DB) InsertTransaction(transaction *types.Transaction) (err error) { + // Save to db + dbTx, err := db.Begin() + + if err != nil { + return + } + + defer func() { + if p := recover(); p != nil { + dbTx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + dbTx.Rollback() + } else { + err = dbTx.Commit() + } + }() + + // save tx + query1 := "INSERT INTO transaction(id,orgId,userId,date,inserted,updated,description,data) VALUES(UNHEX(?),UNHEX(?),UNHEX(?),?,?,?,?,?)" + + _, err = dbTx.Exec( + query1, + transaction.Id, + transaction.OrgId, + transaction.UserId, + util.TimeToMs(transaction.Date), + util.TimeToMs(transaction.Inserted), + util.TimeToMs(transaction.Updated), + transaction.Description, + transaction.Data, + ) + + if err != nil { + return + } + + // save splits + for _, split := range transaction.Splits { + query := "INSERT INTO split(transactionId,accountId,date,inserted,updated,amount,nativeAmount) VALUES (UNHEX(?),UNHEX(?),?,?,?,?,?)" + + _, err = dbTx.Exec( + query, + transaction.Id, + split.AccountId, + util.TimeToMs(transaction.Date), + util.TimeToMs(transaction.Inserted), + util.TimeToMs(transaction.Updated), + split.Amount, + split.NativeAmount) + + if err != nil { + return + } + } + + return +} + +func (db *DB) GetTransactionById(id string) (*types.Transaction, error) { + row := db.QueryRow("SELECT "+txFields+" FROM transaction WHERE id = UNHEX(?)", id) + + t, err := db.unmarshalTransaction(row) + + if err != nil { + return nil, err + } + + rows, err := db.Query("SELECT "+splitFields+" FROM split WHERE transactionId = UNHEX(?) ORDER BY id", t.Id) + + if err != nil { + return nil, err + } + + t.Splits, err = db.unmarshalSplits(rows) + + if err != nil { + return nil, err + } + + return t, nil +} + +func (db *DB) GetTransactionsByAccount(accountId string, options *types.QueryOptions) ([]*types.Transaction, error) { + query := "SELECT LOWER(HEX(s.transactionId)) FROM split s" + + if options.DescriptionStartsWith != "" { + query = query + " JOIN transaction t ON t.id = s.transactionId" + } + + query = query + " WHERE s.accountId = UNHEX(?)" + + query = db.addOptionsToQuery(query, options) + + rows, err := db.Query(query, accountId) + + if err != nil { + return nil, err + } + + defer rows.Close() + + var ids []string + + for rows.Next() { + var id string + err = rows.Scan(&id) + if err != nil { + return nil, err + } + + ids = append(ids, "UNHEX(\""+id+"\")") + } + err = rows.Err() + if err != nil { + return nil, err + } + + if len(ids) == 0 { + return make([]*types.Transaction, 0), nil + } + + query = "SELECT " + txFields + " FROM transaction WHERE id IN (" + strings.Join(ids, ",") + ")" + + query = db.addSortToQuery(query, options) + + rows, err = db.Query(query) + + if err != nil { + return nil, err + } + + transactions, err := db.unmarshalTransactions(rows) + + if err != nil { + return nil, err + } + + transactionMap := make(map[string]*types.Transaction) + + for _, t := range transactions { + transactionMap[t.Id] = t + } + + rows, err = db.Query("SELECT " + splitFields + " FROM split WHERE transactionId IN (" + strings.Join(ids, ",") + ") ORDER BY id") + + if err != nil { + return nil, err + } + + splits, err := db.unmarshalSplits(rows) + + if err != nil { + return nil, err + } + + for _, s := range splits { + transaction := transactionMap[s.TransactionId] + transaction.Splits = append(transaction.Splits, s) + } + + return transactions, nil +} + +func (db *DB) GetTransactionsByOrg(orgId string, options *types.QueryOptions, accountIds []string) ([]*types.Transaction, error) { + if len(accountIds) == 0 { + return make([]*types.Transaction, 0), nil + } + + for i, accountId := range accountIds { + accountIds[i] = "UNHEX(\"" + accountId + "\")" + } + + query := "SELECT DISTINCT LOWER(HEX(s.transactionId)),s.date,s.inserted,s.updated FROM split s" + + if options.DescriptionStartsWith != "" { + query = query + " JOIN transaction t ON t.id = s.transactionId" + } + + query = query + " WHERE s.accountId IN (" + strings.Join(accountIds, ",") + ")" + + query = db.addOptionsToQuery(query, options) + + rows, err := db.Query(query) + + if err != nil { + return nil, err + } + + defer rows.Close() + + ids := []string{} + + for rows.Next() { + var id string + var date int64 + var inserted int64 + var updated int64 + err = rows.Scan(&id, &date, &inserted, &updated) + + if err != nil { + return nil, err + } + + ids = append(ids, "UNHEX(\""+id+"\")") + } + err = rows.Err() + if err != nil { + return nil, err + } + + if len(ids) == 0 { + return make([]*types.Transaction, 0), nil + } + + query = "SELECT " + txFields + " FROM transaction WHERE id IN (" + strings.Join(ids, ",") + ")" + + query = db.addSortToQuery(query, options) + + rows, err = db.Query(query) + + if err != nil { + return nil, err + } + + transactions, err := db.unmarshalTransactions(rows) + + if err != nil { + return nil, err + } + + transactionMap := make(map[string]*types.Transaction) + + for _, t := range transactions { + transactionMap[t.Id] = t + } + + rows, err = db.Query("SELECT " + splitFields + " FROM split WHERE transactionId IN (" + strings.Join(ids, ",") + ") ORDER BY id") + + if err != nil { + return nil, err + } + + splits, err := db.unmarshalSplits(rows) + + if err != nil { + return nil, err + } + + for _, s := range splits { + transaction := transactionMap[s.TransactionId] + transaction.Splits = append(transaction.Splits, s) + } + + return transactions, nil +} + +func (db *DB) DeleteTransaction(id string) (err error) { + dbTx, err := db.Begin() + + if err != nil { + return + } + + defer func() { + if p := recover(); p != nil { + dbTx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + dbTx.Rollback() + } else { + err = dbTx.Commit() + } + }() + + updatedTime := util.TimeToMs(time.Now()) + + // mark splits as deleted + + query1 := "UPDATE split SET updated = ?, deleted = true WHERE transactionId = UNHEX(?)" + + _, err = dbTx.Exec( + query1, + updatedTime, + id, + ) + + if err != nil { + return + } + + // mark transaction as deleted + + query2 := "UPDATE transaction SET updated = ?, deleted = true WHERE id = UNHEX(?)" + + _, err = dbTx.Exec( + query2, + updatedTime, + id, + ) + + if err != nil { + return + } + + return +} + +func (db *DB) DeleteAndInsertTransaction(oldId string, transaction *types.Transaction) (err error) { + // Save to db + dbTx, err := db.Begin() + + if err != nil { + return + } + + defer func() { + if p := recover(); p != nil { + dbTx.Rollback() + panic(p) // re-throw panic after Rollback + } else if err != nil { + dbTx.Rollback() + } else { + err = dbTx.Commit() + } + }() + + updatedTime := util.TimeToMs(transaction.Updated) + + // mark splits as deleted + + query1 := "UPDATE split SET updated = ?, deleted = true WHERE transactionId = UNHEX(?)" + + _, err = dbTx.Exec( + query1, + updatedTime, + oldId, + ) + + if err != nil { + return + } + + // mark transaction as deleted + + query2 := "UPDATE transaction SET updated = ?, deleted = true WHERE id = UNHEX(?)" + + _, err = dbTx.Exec( + query2, + updatedTime, + oldId, + ) + + if err != nil { + return + } + + // save new tx + query3 := "INSERT INTO transaction(id,orgId,userId,date,inserted,updated,description,data) VALUES(UNHEX(?),UNHEX(?),UNHEX(?),?,?,?,?,?)" + + _, err = dbTx.Exec( + query3, + transaction.Id, + transaction.OrgId, + transaction.UserId, + util.TimeToMs(transaction.Date), + util.TimeToMs(transaction.Inserted), + updatedTime, + transaction.Description, + transaction.Data, + ) + + if err != nil { + return + } + + // save splits + for _, split := range transaction.Splits { + query := "INSERT INTO split(transactionId,accountId,date,inserted,updated,amount,nativeAmount) VALUES (UNHEX(?),UNHEX(?),?,?,?,?,?)" + + _, err = dbTx.Exec( + query, + transaction.Id, + split.AccountId, + util.TimeToMs(transaction.Date), + util.TimeToMs(transaction.Inserted), + updatedTime, + split.Amount, + split.NativeAmount) + + if err != nil { + return + } + } + + return +} + +func (db *DB) unmarshalTransaction(row *sql.Row) (*types.Transaction, error) { + t := new(types.Transaction) + + var date int64 + var inserted int64 + var updated int64 + + err := row.Scan(&t.Id, &t.OrgId, &t.UserId, &date, &inserted, &updated, &t.Description, &t.Data, &t.Deleted) + + if err != nil { + return nil, err + } + + t.Date = util.MsToTime(date) + t.Inserted = util.MsToTime(inserted) + t.Updated = util.MsToTime(updated) + + return t, nil +} + +func (db *DB) unmarshalTransactions(rows *sql.Rows) ([]*types.Transaction, error) { + defer rows.Close() + + transactions := make([]*types.Transaction, 0) + + for rows.Next() { + t := new(types.Transaction) + var date int64 + var inserted int64 + var updated int64 + err := rows.Scan(&t.Id, &t.OrgId, &t.UserId, &date, &inserted, &updated, &t.Description, &t.Data, &t.Deleted) + if err != nil { + return nil, err + } + + t.Date = util.MsToTime(date) + t.Inserted = util.MsToTime(inserted) + t.Updated = util.MsToTime(updated) + transactions = append(transactions, t) + } + + err := rows.Err() + + if err != nil { + return nil, err + } + + return transactions, nil +} + +func (db *DB) unmarshalSplits(rows *sql.Rows) ([]*types.Split, error) { + defer rows.Close() + + splits := make([]*types.Split, 0) + + for rows.Next() { + s := new(types.Split) + var id int64 + var date int64 + var inserted int64 + var updated int64 + var deleted bool + err := rows.Scan(&id, &s.TransactionId, &s.AccountId, &date, &inserted, &updated, &s.Amount, &s.NativeAmount, &deleted) + if err != nil { + return nil, err + } + + splits = append(splits, s) + } + + err := rows.Err() + + if err != nil { + return nil, err + } + + return splits, nil +} + +func (db *DB) addOptionsToQuery(query string, options *types.QueryOptions) string { + if options.IncludeDeleted != true { + query += " AND s.deleted = false" + } + + if options.SinceInserted != 0 { + query += " AND s.inserted > " + strconv.Itoa(options.SinceInserted) + } + + if options.SinceUpdated != 0 { + query += " AND s.updated > " + strconv.Itoa(options.SinceUpdated) + } + + if options.BeforeInserted != 0 { + query += " AND s.inserted < " + strconv.Itoa(options.BeforeInserted) + } + + if options.BeforeUpdated != 0 { + query += " AND s.updated < " + strconv.Itoa(options.BeforeUpdated) + } + + if options.StartDate != 0 { + query += " AND s.date >= " + strconv.Itoa(options.StartDate) + } + + if options.EndDate != 0 { + query += " AND s.date < " + strconv.Itoa(options.EndDate) + } + + if options.DescriptionStartsWith != "" { + query += " AND t.description LIKE '" + db.Escape(options.DescriptionStartsWith) + "%'" + } + + if options.Sort == "updated-asc" { + query += " ORDER BY s.updated ASC" + } else { + query += " ORDER BY s.date DESC, s.inserted DESC" + } + + if options.Limit != 0 && options.Skip != 0 { + query += " LIMIT " + strconv.Itoa(options.Skip) + ", " + strconv.Itoa(options.Limit) + } else if options.Limit != 0 { + query += " LIMIT " + strconv.Itoa(options.Limit) + } + + return query +} + +func (db *DB) addSortToQuery(query string, options *types.QueryOptions) string { + if options.Sort == "updated-asc" { + query += " ORDER BY updated ASC" + } else { + query += " ORDER BY date DESC, inserted DESC" + } + + return query +} diff --git a/core/model/db/user.go b/core/model/db/user.go new file mode 100644 index 0000000..0fbf3aa --- /dev/null +++ b/core/model/db/user.go @@ -0,0 +1,264 @@ +package db + +import ( + "database/sql" + "errors" + "fmt" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +const userFields = "LOWER(HEX(u.id)),u.inserted,u.updated,u.firstName,u.lastName,u.email,u.passwordHash,u.agreeToTerms,u.passwordReset,u.emailVerified,u.emailVerifyCode" + +type UserInterface interface { + InsertUser(*types.User) error + VerifyUser(string) error + UpdateUser(*types.User) error + UpdateUserResetPassword(*types.User) error + GetVerifiedUserByEmail(string) (*types.User, error) + GetUserByActiveSession(string) (*types.User, error) + GetUserByApiKey(string) (*types.User, error) + GetUserByResetCode(string) (*types.User, error) + GetOrgAdmins(string) ([]*types.User, error) +} + +func (db *DB) InsertUser(user *types.User) error { + user.Inserted = time.Now() + user.Updated = user.Inserted + user.PasswordReset = "" + + query := "INSERT INTO user(id,inserted,updated,firstName,lastName,email,passwordHash,agreeToTerms,passwordReset,emailVerified,emailVerifyCode) VALUES(UNHEX(?),?,?,?,?,?,?,?,?,?,?)" + res, err := db.Exec( + query, + user.Id, + util.TimeToMs(user.Inserted), + util.TimeToMs(user.Updated), + user.FirstName, + user.LastName, + user.Email, + user.PasswordHash, + user.AgreeToTerms, + user.PasswordReset, + user.EmailVerified, + user.EmailVerifyCode, + ) + if err != nil { + return err + } + + rowCnt, err := res.RowsAffected() + if err != nil { + return err + } + + if rowCnt < 1 { + return errors.New("Unable to insert user into db") + } + + return nil +} + +func (db *DB) VerifyUser(code string) error { + query := "UPDATE user SET updated = ?, emailVerified = 1 WHERE emailVerifyCode = ?" + res, err := db.Exec( + query, + util.TimeToMs(time.Now()), + code, + ) + + count, err := res.RowsAffected() + + if err != nil { + return nil + } + + if count == 0 { + return errors.New("Invalid code") + } + + return nil +} + +func (db *DB) UpdateUser(user *types.User) error { + user.Updated = time.Now() + + query := "UPDATE user SET updated = ?, passwordHash = ?, passwordReset = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(user.Updated), + user.PasswordHash, + "", + user.Id, + ) + + return err +} + +func (db *DB) UpdateUserResetPassword(user *types.User) error { + user.Updated = time.Now() + + query := "UPDATE user SET updated = ?, passwordReset = ? WHERE id = UNHEX(?)" + _, err := db.Exec( + query, + util.TimeToMs(user.Updated), + user.PasswordReset, + user.Id, + ) + + return err +} + +func (db *DB) GetVerifiedUserByEmail(email string) (*types.User, error) { + query := "SELECT " + userFields + " FROM user u WHERE email = ? AND emailVerified = 1" + + row := db.QueryRow(query, email) + u, err := db.unmarshalUser(row) + + if err != nil { + return nil, err + } + + return u, nil +} + +func (db *DB) GetUserByActiveSession(sessionId string) (*types.User, error) { + qSelect := "SELECT " + userFields + qFrom := " FROM user u" + qJoin := " JOIN session s ON s.userId = u.id" + qWhere := " WHERE s.terminated IS NULL AND s.id = UNHEX(?)" + + query := qSelect + qFrom + qJoin + qWhere + + row := db.QueryRow(query, sessionId) + u, err := db.unmarshalUser(row) + + if err != nil { + return nil, err + } + + return u, nil +} + +func (db *DB) GetUserByApiKey(keyId string) (*types.User, error) { + qSelect := "SELECT " + userFields + qFrom := " FROM user u" + qJoin := " JOIN apikey a ON a.userId = u.id" + qWhere := " WHERE a.deleted IS NULL AND a.id = UNHEX(?)" + + query := qSelect + qFrom + qJoin + qWhere + + row := db.QueryRow(query, keyId) + u, err := db.unmarshalUser(row) + + if err != nil { + return nil, err + } + + return u, nil +} + +func (db *DB) GetUserByResetCode(code string) (*types.User, error) { + qSelect := "SELECT " + userFields + qFrom := " FROM user u" + qWhere := " WHERE u.passwordReset = ?" + + query := qSelect + qFrom + qWhere + + row := db.QueryRow(query, code) + u, err := db.unmarshalUser(row) + + if err != nil { + return nil, err + } + + fmt.Println(u) + + return u, nil +} + +func (db *DB) GetOrgAdmins(orgId string) ([]*types.User, error) { + qSelect := "SELECT " + userFields + qFrom := " FROM user u" + qJoin := " JOIN userorg uo ON uo.userId = u.id" + qWhere := " WHERE uo.admin = true AND uo.orgId = UNHEX(?)" + + query := qSelect + qFrom + qJoin + qWhere + + rows, err := db.Query(query, orgId) + + if err != nil { + return nil, err + } + + return db.unmarshalUsers(rows) +} + +func (db *DB) unmarshalUser(row *sql.Row) (*types.User, error) { + u := new(types.User) + + var inserted int64 + var updated int64 + + err := row.Scan( + &u.Id, + &inserted, + &updated, + &u.FirstName, + &u.LastName, + &u.Email, + &u.PasswordHash, + &u.AgreeToTerms, + &u.PasswordReset, + &u.EmailVerified, + &u.EmailVerifyCode, + ) + + if err != nil { + return nil, err + } + + u.Inserted = util.MsToTime(inserted) + u.Updated = util.MsToTime(updated) + + return u, nil +} + +func (db *DB) unmarshalUsers(rows *sql.Rows) ([]*types.User, error) { + defer rows.Close() + + users := make([]*types.User, 0) + + for rows.Next() { + u := new(types.User) + var inserted int64 + var updated int64 + + err := rows.Scan( + &u.Id, + &inserted, + &updated, + &u.FirstName, + &u.LastName, + &u.Email, + &u.PasswordHash, + &u.AgreeToTerms, + &u.PasswordReset, + &u.EmailVerified, + &u.EmailVerifyCode, + ) + + if err != nil { + return nil, err + } + + u.Inserted = util.MsToTime(inserted) + u.Updated = util.MsToTime(updated) + + users = append(users, u) + } + + err := rows.Err() + + return users, err +} diff --git a/core/model/model.go b/core/model/model.go new file mode 100644 index 0000000..47629ba --- /dev/null +++ b/core/model/model.go @@ -0,0 +1,31 @@ +package model + +import ( + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" +) + +var Instance Interface + +type Model struct { + db db.Datastore + bcrypt util.Bcrypt + config types.Config +} + +type Interface interface { + UserInterface + OrgInterface + AccountInterface + TransactionInterface + PriceInterface + SessionInterface + ApiKeyInterface +} + +func NewModel(db db.Datastore, bcrypt util.Bcrypt, config types.Config) *Model { + model := &Model{db: db, bcrypt: bcrypt, config: config} + Instance = model + return model +} diff --git a/core/model/org.go b/core/model/org.go new file mode 100644 index 0000000..649705b --- /dev/null +++ b/core/model/org.go @@ -0,0 +1,294 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "time" +) + +type OrgInterface interface { + CreateOrg(*types.Org, string) error + UpdateOrg(*types.Org, string) error + GetOrg(string, string) (*types.Org, error) + GetOrgs(string) ([]*types.Org, error) + CreateInvite(*types.Invite, string) error + AcceptInvite(*types.Invite, string) error + GetInvites(string, string) ([]*types.Invite, error) + DeleteInvite(string, string) error +} + +func (model *Model) CreateOrg(org *types.Org, userId string) error { + if org.Name == "" { + return errors.New("name required") + } + + if org.Currency == "" { + return errors.New("currency required") + } + + accounts := make([]*types.Account, 6) + + id, err := util.NewGuid() + + if err != nil { + return err + } + + accounts[0] = &types.Account{ + Id: id, + Name: "Root", + Parent: "", + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: true, + } + + id, err = util.NewGuid() + + if err != nil { + return err + } + + accounts[1] = &types.Account{ + Id: id, + Name: "Assets", + Parent: accounts[0].Id, + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: true, + } + + id, err = util.NewGuid() + + if err != nil { + return err + } + + accounts[2] = &types.Account{ + Id: id, + Name: "Liabilities", + Parent: accounts[0].Id, + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: false, + } + + id, err = util.NewGuid() + + if err != nil { + return err + } + + accounts[3] = &types.Account{ + Id: id, + Name: "Equity", + Parent: accounts[0].Id, + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: false, + } + + id, err = util.NewGuid() + + if err != nil { + return err + } + + accounts[4] = &types.Account{ + Id: id, + Name: "Income", + Parent: accounts[0].Id, + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: false, + } + + id, err = util.NewGuid() + + if err != nil { + return err + } + + accounts[5] = &types.Account{ + Id: id, + Name: "Expenses", + Parent: accounts[0].Id, + Currency: org.Currency, + Precision: org.Precision, + DebitBalance: true, + } + + return model.db.CreateOrg(org, userId, accounts) +} + +func (model *Model) UpdateOrg(org *types.Org, userId string) error { + _, err := model.GetOrg(org.Id, userId) + + if err != nil { + // user doesn't have access to org + return errors.New("access denied") + } + + if org.Name == "" { + return errors.New("name required") + } + + return model.db.UpdateOrg(org) +} + +func (model *Model) GetOrg(orgId string, userId string) (*types.Org, error) { + return model.db.GetOrg(orgId, userId) +} + +func (model *Model) GetOrgs(userId string) ([]*types.Org, error) { + return model.db.GetOrgs(userId) +} + +func (model *Model) UserBelongsToOrg(userId string, orgId string) (bool, error) { + orgs, err := model.GetOrgs(userId) + + if err != nil { + return false, err + } + + belongs := false + + for _, org := range orgs { + if org.Id == orgId { + belongs = true + break + } + } + + return belongs, nil +} + +func (model *Model) CreateInvite(invite *types.Invite, userId string) error { + admins, err := model.db.GetOrgAdmins(invite.OrgId) + + if err != nil { + return err + } + + isAdmin := false + + for _, admin := range admins { + if admin.Id == userId { + isAdmin = true + break + } + } + + if isAdmin == false { + return errors.New("Must be org admin to invite users") + } + + inviteId, err := util.NewInviteId() + + if err != nil { + return err + } + + invite.Id = inviteId + + err = model.db.InsertInvite(invite) + + if err != nil { + return err + } + + if invite.Email != "" { + // TODO send email + } + + return nil +} + +func (model *Model) AcceptInvite(invite *types.Invite, userId string) error { + if invite.Accepted != true { + return errors.New("accepted must be true") + } + + if invite.Id == "" { + return errors.New("missing invite id") + } + + // Get original invite + original, err := model.db.GetInvite(invite.Id) + + if err != nil { + return err + } + + if original.Accepted == true { + return errors.New("invite already accepted") + } + + oneWeekAfter := original.Inserted.Add(time.Hour * 24 * 7) + + if time.Now().After(oneWeekAfter) == true { + return errors.New("invite has expired") + } + + invite.OrgId = original.OrgId + invite.Email = original.Email + invite.Inserted = original.Inserted + + return model.db.AcceptInvite(invite, userId) +} + +func (model *Model) GetInvites(orgId string, userId string) ([]*types.Invite, error) { + admins, err := model.db.GetOrgAdmins(orgId) + + if err != nil { + return nil, err + } + + isAdmin := false + + for _, admin := range admins { + if admin.Id == userId { + isAdmin = true + break + } + } + + if isAdmin == false { + return nil, errors.New("Must be org admin to invite users") + } + + return model.db.GetInvites(orgId) +} + +func (model *Model) DeleteInvite(id string, userId string) error { + // Get original invite + invite, err := model.db.GetInvite(id) + + if err != nil { + return err + } + + // make sure user has access + + admins, err := model.db.GetOrgAdmins(invite.OrgId) + + if err != nil { + return nil + } + + isAdmin := false + + for _, admin := range admins { + if admin.Id == userId { + isAdmin = true + break + } + } + + if isAdmin == false { + return errors.New("Must be org admin to delete invite") + } + + return model.db.DeleteInvite(id) +} diff --git a/core/model/org_test.go b/core/model/org_test.go new file mode 100644 index 0000000..7aa16a7 --- /dev/null +++ b/core/model/org_test.go @@ -0,0 +1,74 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/stretchr/testify/assert" + "testing" +) + +type TdOrg struct { + db.Datastore +} + +func (td *TdOrg) GetOrg(orgId string, userId string) (*types.Org, error) { + if userId == "1" { + return &types.Org{ + Id: "1", + Name: "MyOrg", + Currency: "USD", + Precision: 2, + }, nil + } else { + return nil, errors.New("not found") + } +} + +func (td *TdOrg) UpdateOrg(org *types.Org) error { + return nil +} + +func TestUpdateOrg(t *testing.T) { + tests := map[string]struct { + err error + org *types.Org + userId string + }{ + "success": { + err: nil, + org: &types.Org{ + Id: "1", + Name: "MyOrg2", + }, + userId: "1", + }, + "access denied": { + err: errors.New("access denied"), + org: &types.Org{ + Id: "1", + Name: "MyOrg2", + }, + userId: "2", + }, + "error": { + err: errors.New("name required"), + org: &types.Org{ + Id: "1", + Name: "", + }, + userId: "1", + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdOrg{} + + model := NewModel(td, nil, types.Config{}) + + err := model.UpdateOrg(test.org, test.userId) + assert.Equal(t, test.err, err) + } +} diff --git a/core/model/price.go b/core/model/price.go new file mode 100644 index 0000000..0606045 --- /dev/null +++ b/core/model/price.go @@ -0,0 +1,117 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/ws" + "time" +) + +type PriceInterface interface { + CreatePrice(*types.Price, string) error + DeletePrice(string, string) error + GetPricesNearestInTime(string, time.Time, string) ([]*types.Price, error) + GetPricesByCurrency(string, string, string) ([]*types.Price, error) +} + +func (model *Model) CreatePrice(price *types.Price, userId string) error { + belongs, err := model.UserBelongsToOrg(userId, price.OrgId) + + if err != nil { + return err + } + + if belongs == false { + return errors.New("User does not belong to org") + } + + if price.Id == "" { + return errors.New("id required") + } + + if price.OrgId == "" { + return errors.New("orgId required") + } + + if price.Currency == "" { + return errors.New("currency required") + } + + err = model.db.InsertPrice(price) + + if err != nil { + return err + } + + // Notify web socket subscribers + userIds, err2 := model.db.GetOrgUserIds(price.OrgId) + + if err2 == nil { + ws.PushPrice(price, userIds, "create") + } + + return nil +} + +func (model *Model) DeletePrice(id string, userId string) error { + // Get original price + price, err := model.db.GetPriceById(id) + + if err != nil { + return err + } + + belongs, err := model.UserBelongsToOrg(userId, price.OrgId) + + if err != nil { + return err + } + + if belongs == false { + return errors.New("User does not belong to org") + } + + err = model.db.DeletePrice(id) + + if err != nil { + return err + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access account + userIds, err2 := model.db.GetOrgUserIds(price.OrgId) + + if err2 == nil { + ws.PushPrice(price, userIds, "delete") + } + + return nil +} + +func (model *Model) GetPricesNearestInTime(orgId string, date time.Time, userId string) ([]*types.Price, error) { + belongs, err := model.UserBelongsToOrg(userId, orgId) + + if err != nil { + return nil, err + } + + if belongs == false { + return nil, errors.New("User does not belong to org") + } + + return model.db.GetPricesNearestInTime(orgId, date) +} + +func (model *Model) GetPricesByCurrency(orgId string, currency string, userId string) ([]*types.Price, error) { + belongs, err := model.UserBelongsToOrg(userId, orgId) + + if err != nil { + return nil, err + } + + if belongs == false { + return nil, errors.New("User does not belong to org") + } + + return model.db.GetPricesByCurrency(orgId, currency) +} diff --git a/core/model/price_test.go b/core/model/price_test.go new file mode 100644 index 0000000..e3f09cc --- /dev/null +++ b/core/model/price_test.go @@ -0,0 +1,149 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/mocks" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +func TestCreatePrice(t *testing.T) { + + price := types.Price{ + "1", + "2", + "BTC", + time.Unix(0, 0), + time.Unix(0, 0), + time.Unix(0, 0), + 6700, + } + + badPrice := types.Price{ + "1", + "2", + "", + time.Unix(0, 0), + time.Unix(0, 0), + time.Unix(0, 0), + 6700, + } + + badOrg := types.Price{ + "1", + "1", + "BTC", + time.Unix(0, 0), + time.Unix(0, 0), + time.Unix(0, 0), + 6700, + } + + tests := map[string]struct { + err error + price types.Price + }{ + "successful": { + err: nil, + price: price, + }, + "with error": { + err: errors.New("currency required"), + price: badPrice, + }, + "with org error": { + err: errors.New("User does not belong to org"), + price: badOrg, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + price := test.price + userId := "3" + + db := &mocks.Datastore{} + + db.On("GetOrgs", userId).Return([]*types.Org{ + { + Id: "2", + }, + }, nil) + + db.On("InsertPrice", &test.price).Return(nil) + + db.On("GetOrgUserIds", price.OrgId).Return([]string{userId}, nil) + + model := NewModel(db, &util.StandardBcrypt{}, types.Config{}) + + err := model.CreatePrice(&price, userId) + + assert.Equal(t, test.err, err) + } +} + +func TestDeletePrice(t *testing.T) { + + price := types.Price{ + "1", + "2", + "BTC", + time.Unix(0, 0), + time.Unix(0, 0), + time.Unix(0, 0), + 6700, + } + + tests := map[string]struct { + err error + userId string + price types.Price + }{ + "successful": { + err: nil, + price: price, + userId: "3", + }, + "with org error": { + err: errors.New("User does not belong to org"), + price: price, + userId: "4", + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + price := test.price + + db := &mocks.Datastore{} + + db.On("GetPriceById", price.Id).Return(&price, nil) + + db.On("GetOrgs", "3").Return([]*types.Org{ + { + Id: "2", + }, + }, nil) + + db.On("GetOrgs", "4").Return([]*types.Org{ + { + Id: "7", + }, + }, nil) + + db.On("DeletePrice", price.Id).Return(nil) + + db.On("GetOrgUserIds", price.OrgId).Return([]string{test.userId}, nil) + + model := NewModel(db, &util.StandardBcrypt{}, types.Config{}) + + err := model.DeletePrice(price.Id, test.userId) + + assert.Equal(t, test.err, err) + } +} diff --git a/core/model/session.go b/core/model/session.go new file mode 100644 index 0000000..ee22f27 --- /dev/null +++ b/core/model/session.go @@ -0,0 +1,23 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" +) + +type SessionInterface interface { + CreateSession(*types.Session) error + DeleteSession(string, string) error +} + +func (model *Model) CreateSession(session *types.Session) error { + if session.Id == "" { + return errors.New("id required") + } + + return model.db.InsertSession(session) +} + +func (model *Model) DeleteSession(id string, userId string) error { + return model.db.DeleteSession(id, userId) +} diff --git a/core/model/transaction.go b/core/model/transaction.go new file mode 100644 index 0000000..103ad26 --- /dev/null +++ b/core/model/transaction.go @@ -0,0 +1,213 @@ +package model + +import ( + "errors" + "fmt" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/ws" + "time" +) + +type TransactionInterface interface { + CreateTransaction(*types.Transaction) error + UpdateTransaction(string, *types.Transaction) error + GetTransactionsByAccount(string, string, string, *types.QueryOptions) ([]*types.Transaction, error) + GetTransactionsByOrg(string, string, *types.QueryOptions) ([]*types.Transaction, error) + DeleteTransaction(string, string, string) error +} + +func (model *Model) CreateTransaction(transaction *types.Transaction) (err error) { + err = model.checkSplits(transaction) + + if err != nil { + return + } + + if transaction.Id == "" { + return errors.New("id required") + } + + transaction.Inserted = time.Now() + transaction.Updated = time.Now() + + if transaction.Date.IsZero() { + transaction.Date = transaction.Inserted + } + + err = model.db.InsertTransaction(transaction) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access transaction + userIds, err2 := model.db.GetOrgUserIds(transaction.OrgId) + + if err2 == nil { + ws.PushTransaction(transaction, userIds, "create") + } + + return +} + +func (model *Model) UpdateTransaction(oldId string, transaction *types.Transaction) (err error) { + err = model.checkSplits(transaction) + + if err != nil { + return + } + + if oldId == "" || transaction.Id == "" { + return errors.New("id required") + } + + // Get original transaction + original, err := model.getTransactionById(oldId) + + if err != nil { + return + } + + transaction.Updated = time.Now() + transaction.Inserted = original.Inserted + + // We used to compare splits and if they hadn't changed just do an update + // on the transaction. The problem is then the updated field gets out of sync + // between the tranaction and its splits. + // It needs to be in sync for getTransactionsByOrg() to work correctly with pagination + + // Delete old transaction and insert a new one + transaction.Inserted = transaction.Updated + err = model.db.DeleteAndInsertTransaction(oldId, transaction) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access transaction + userIds, err2 := model.db.GetOrgUserIds(transaction.OrgId) + + if err2 == nil { + ws.PushTransaction(original, userIds, "delete") + ws.PushTransaction(transaction, userIds, "create") + } + + return +} + +func (model *Model) GetTransactionsByAccount(orgId string, userId string, accountId string, options *types.QueryOptions) ([]*types.Transaction, error) { + userAccounts, err := model.GetAccounts(orgId, userId, "") + + if err != nil { + return nil, err + } + + if !model.accountsContainWriteAccess(userAccounts, accountId) { + return nil, errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", accountId)) + } + + return model.db.GetTransactionsByAccount(accountId, options) + +} + +func (model *Model) GetTransactionsByOrg(orgId string, userId string, options *types.QueryOptions) ([]*types.Transaction, error) { + userAccounts, err := model.GetAccounts(orgId, userId, "") + + if err != nil { + return nil, err + } + + var accountIds []string + for _, account := range userAccounts { + accountIds = append(accountIds, account.Id) + } + + return model.db.GetTransactionsByOrg(orgId, options, accountIds) +} + +func (model *Model) DeleteTransaction(id string, userId string, orgId string) (err error) { + transaction, err := model.getTransactionById(id) + + if err != nil { + return + } + + userAccounts, err := model.GetAccounts(orgId, userId, "") + + if err != nil { + return + } + + for _, split := range transaction.Splits { + if !model.accountsContainWriteAccess(userAccounts, split.AccountId) { + return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", split.AccountId)) + } + } + + err = model.db.DeleteTransaction(id) + + if err != nil { + return + } + + // Notify web socket subscribers + // TODO only get user ids that have permission to access transaction + userIds, err2 := model.db.GetOrgUserIds(transaction.OrgId) + + if err2 == nil { + ws.PushTransaction(transaction, userIds, "delete") + } + + return +} + +func (model *Model) getTransactionById(id string) (*types.Transaction, error) { + // TODO if this is made public, make a separate version that checks permission + return model.db.GetTransactionById(id) +} + +func (model *Model) checkSplits(transaction *types.Transaction) (err error) { + if len(transaction.Splits) < 2 { + return errors.New("at least 2 splits are required") + } + + org, err := model.GetOrg(transaction.OrgId, transaction.UserId) + + if err != nil { + return + } + + userAccounts, err := model.GetAccounts(transaction.OrgId, transaction.UserId, "") + + if err != nil { + return + } + + var amount int64 = 0 + + for _, split := range transaction.Splits { + if !model.accountsContainWriteAccess(userAccounts, split.AccountId) { + return errors.New(fmt.Sprintf("%s %s", "user does not have permission to access account", split.AccountId)) + } + + account := model.getAccountFromList(userAccounts, split.AccountId) + + if account.HasChildren == true { + return errors.New("Cannot use parent account for split") + } + + if account.Currency == org.Currency && split.NativeAmount != split.Amount { + return errors.New("nativeAmount must equal amount for native currency splits") + } + + amount += split.NativeAmount + } + + if amount != 0 { + return errors.New("splits must add up to 0") + } + + return +} diff --git a/core/model/transaction_test.go b/core/model/transaction_test.go new file mode 100644 index 0000000..7546f4c --- /dev/null +++ b/core/model/transaction_test.go @@ -0,0 +1,141 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "testing" + "time" +) + +type TdTransaction struct { + db.Datastore + mock.Mock +} + +func (td *TdTransaction) GetOrg(orgId string, userId string) (*types.Org, error) { + org := &types.Org{ + Currency: "USD", + } + + return org, nil +} + +func (td *TdTransaction) GetPermissionedAccountIds(userId string, orgId string, tokenId string) ([]string, error) { + return []string{"1", "2"}, nil +} + +func (td *TdTransaction) GetAccountsByOrgId(orgId string) ([]*types.Account, error) { + return []*types.Account{&types.Account{Id: "1", Currency: "USD"}, &types.Account{Id: "2"}}, nil +} + +func (td *TdTransaction) InsertTransaction(transaction *types.Transaction) (err error) { + return nil +} + +func (td *TdTransaction) GetTransactionById(id string) (*types.Transaction, error) { + args := td.Called(id) + return args.Get(0).(*types.Transaction), args.Error(1) +} + +func (td *TdTransaction) UpdateTransaction(oldId string, transaction *types.Transaction) error { + args := td.Called(oldId, transaction) + return args.Error(0) +} + +func (td *TdTransaction) GetOrgUserIds(id string) ([]string, error) { + return []string{"1"}, nil +} + +func TestCreateTransaction(t *testing.T) { + tests := map[string]struct { + err error + tx *types.Transaction + }{ + "successful": { + err: nil, + tx: &types.Transaction{ + "1", + "2", + "3", + time.Now(), + time.Now(), + time.Now(), + "description", + "", + false, + []*types.Split{ + &types.Split{"1", "1", 1000, 1000}, + &types.Split{"1", "2", -1000, -1000}, + }, + }, + }, + "bad split amounts": { + err: errors.New("splits must add up to 0"), + tx: &types.Transaction{ + "1", + "2", + "3", + time.Now(), + time.Now(), + time.Now(), + "description", + "", + false, + []*types.Split{ + &types.Split{"1", "1", 1000, 1000}, + &types.Split{"1", "2", -500, -500}, + }, + }, + }, + "lacking permission": { + err: errors.New("user does not have permission to access account 3"), + tx: &types.Transaction{ + "1", + "2", + "3", + time.Now(), + time.Now(), + time.Now(), + "description", + "", + false, + []*types.Split{ + &types.Split{"1", "1", 1000, 1000}, + &types.Split{"1", "3", -1000, -1000}, + }, + }, + }, + "nativeAmount mismatch": { + err: errors.New("nativeAmount must equal amount for native currency splits"), + tx: &types.Transaction{ + "1", + "2", + "3", + time.Now(), + time.Now(), + time.Now(), + "description", + "", + false, + []*types.Split{ + &types.Split{"1", "1", 1000, 500}, + &types.Split{"1", "2", -1000, -500}, + }, + }, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + td := &TdTransaction{} + model := NewModel(td, nil, types.Config{}) + + err := model.CreateTransaction(test.tx) + + assert.Equal(t, err, test.err) + } +} diff --git a/core/model/types/account.go b/core/model/types/account.go new file mode 100644 index 0000000..f02668b --- /dev/null +++ b/core/model/types/account.go @@ -0,0 +1,31 @@ +package types + +import ( + "time" +) + +type Account struct { + Id string `json:"id"` + OrgId string `json:"orgId"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + Name string `json:"name"` + Parent string `json:"parent"` + Currency string `json:"currency"` + Precision int `json:"precision"` + DebitBalance bool `json:"debitBalance"` + Balance *int64 `json:"balance"` + NativeBalance *int64 `json:"nativeBalance"` + ReadOnly bool `json:"readOnly"` + HasChildren bool `json:"-"` +} + +type AccountNode struct { + Account *Account + Parent *AccountNode + Children []*AccountNode +} + +func NewAccount() *Account { + return &Account{Precision: 2} +} diff --git a/core/model/types/apikey.go b/core/model/types/apikey.go new file mode 100644 index 0000000..fab6f44 --- /dev/null +++ b/core/model/types/apikey.go @@ -0,0 +1,15 @@ +package types + +import ( + "github.com/go-sql-driver/mysql" + "time" +) + +type ApiKey struct { + Id string `json:"id"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + UserId string `json:"userId"` + Label string `json:"label"` + Deleted mysql.NullTime `json:"-"` // Can we marshal this correctly? +} diff --git a/core/model/types/config.go b/core/model/types/config.go new file mode 100644 index 0000000..793ae52 --- /dev/null +++ b/core/model/types/config.go @@ -0,0 +1,14 @@ +package types + +type Config struct { + WebUrl string + Port int + KeyFile string + CertFile string + Database string + User string + Password string + SendgridKey string + SendgridEmail string + SendgridSender string +} diff --git a/core/model/types/invite.go b/core/model/types/invite.go new file mode 100644 index 0000000..72b88e1 --- /dev/null +++ b/core/model/types/invite.go @@ -0,0 +1,14 @@ +package types + +import ( + "time" +) + +type Invite struct { + Id string `json:"id"` + OrgId string `json:"orgId"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + Email string `json:"email"` + Accepted bool `json:"accepted"` +} diff --git a/core/model/types/org.go b/core/model/types/org.go new file mode 100644 index 0000000..124bab5 --- /dev/null +++ b/core/model/types/org.go @@ -0,0 +1,14 @@ +package types + +import ( + "time" +) + +type Org struct { + Id string `json:"id"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + Name string `json:"name"` + Currency string `json:"currency"` + Precision int `json:"precision"` +} diff --git a/core/model/types/price.go b/core/model/types/price.go new file mode 100644 index 0000000..3075227 --- /dev/null +++ b/core/model/types/price.go @@ -0,0 +1,15 @@ +package types + +import ( + "time" +) + +type Price struct { + Id string `json:"id"` + OrgId string `json:"orgId"` + Currency string `json:"currency"` + Date time.Time `json:"date"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + Price float64 `json:"price"` +} diff --git a/core/model/types/query_options.go b/core/model/types/query_options.go new file mode 100644 index 0000000..54b85cf --- /dev/null +++ b/core/model/types/query_options.go @@ -0,0 +1,104 @@ +package types + +import ( + "net/url" + "strconv" +) + +type QueryOptions struct { + Limit int `json:"limit"` + Skip int `json:"skip"` + SinceInserted int `json:"sinceInserted"` + SinceUpdated int `json:"sinceUpdated"` + BeforeInserted int `json:"beforeInserted"` + BeforeUpdated int `json:"beforeUpdated"` + StartDate int `json:"startDate"` + EndDate int `json:"endDate"` + DescriptionStartsWith string `json:"descriptionStartsWith"` + IncludeDeleted bool `json:"includeDeleted"` + Sort string `json:"string"` +} + +func QueryOptionsFromURLQuery(urlQuery url.Values) (*QueryOptions, error) { + qo := &QueryOptions{} + + var err error + + if urlQuery.Get("limit") != "" { + qo.Limit, err = strconv.Atoi(urlQuery.Get("limit")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("skip") != "" { + qo.Skip, err = strconv.Atoi(urlQuery.Get("skip")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("sinceInserted") != "" { + qo.SinceInserted, err = strconv.Atoi(urlQuery.Get("sinceInserted")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("sinceUpdated") != "" { + qo.SinceUpdated, err = strconv.Atoi(urlQuery.Get("sinceUpdated")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("beforeInserted") != "" { + qo.BeforeInserted, err = strconv.Atoi(urlQuery.Get("beforeInserted")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("beforeUpdated") != "" { + qo.BeforeUpdated, err = strconv.Atoi(urlQuery.Get("beforeUpdated")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("startDate") != "" { + qo.StartDate, err = strconv.Atoi(urlQuery.Get("startDate")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("endDate") != "" { + qo.EndDate, err = strconv.Atoi(urlQuery.Get("endDate")) + + if err != nil { + return nil, err + } + } + + if urlQuery.Get("descriptionStartsWith") != "" { + qo.DescriptionStartsWith = urlQuery.Get("descriptionStartsWith") + } + + if urlQuery.Get("includeDeleted") == "true" { + qo.IncludeDeleted = true + } + + if urlQuery.Get("sort") != "" { + qo.Sort = urlQuery.Get("sort") + } + + return qo, nil +} diff --git a/core/model/types/session.go b/core/model/types/session.go new file mode 100644 index 0000000..b4ad783 --- /dev/null +++ b/core/model/types/session.go @@ -0,0 +1,14 @@ +package types + +import ( + "github.com/go-sql-driver/mysql" + "time" +) + +type Session struct { + Id string `json:"id"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + UserId string `json:"userId"` + Terminated mysql.NullTime `json:"-"` // Can we marshal this correctly? +} diff --git a/core/model/types/transaction.go b/core/model/types/transaction.go new file mode 100644 index 0000000..c094e59 --- /dev/null +++ b/core/model/types/transaction.go @@ -0,0 +1,25 @@ +package types + +import ( + "time" +) + +type Transaction struct { + Id string `json:"id"` + OrgId string `json:"orgId"` + UserId string `json:"userId"` + Date time.Time `json:"date"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + Description string `json:"description"` + Data string `json:"data"` + Deleted bool `json:"deleted"` + Splits []*Split `json:"splits"` +} + +type Split struct { + TransactionId string `json:"-"` + AccountId string `json:"accountId"` + Amount int64 `json:"amount"` + NativeAmount int64 `json:"nativeAmount"` +} diff --git a/core/model/types/user.go b/core/model/types/user.go new file mode 100644 index 0000000..912769e --- /dev/null +++ b/core/model/types/user.go @@ -0,0 +1,20 @@ +package types + +import ( + "time" +) + +type User struct { + Id string `json:"id"` + Inserted time.Time `json:"inserted"` + Updated time.Time `json:"updated"` + FirstName string `json:"firstName"` + LastName string `json:"lastName"` + Email string `json:"email"` + Password string `json:"password"` + PasswordHash string `json:"-"` + AgreeToTerms bool `json:"agreeToTerms"` + PasswordReset string `json:"-"` + EmailVerified bool `json:"emailVerified"` + EmailVerifyCode string `json:"-"` +} diff --git a/core/model/user.go b/core/model/user.go new file mode 100644 index 0000000..20b5553 --- /dev/null +++ b/core/model/user.go @@ -0,0 +1,228 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" + "log" +) + +type UserInterface interface { + CreateUser(user *types.User) error + VerifyUser(string) error + UpdateUser(user *types.User) error + ResetPassword(email string) error + ConfirmResetPassword(string, string) (*types.User, error) +} + +func (model *Model) CreateUser(user *types.User) error { + if user.Id == "" { + return errors.New("id required") + } + + if user.FirstName == "" { + return errors.New("first name required") + } + + if user.LastName == "" { + return errors.New("last name required") + } + + if user.Email == "" { + return errors.New("email required") + } + + if user.Password == "" { + return errors.New("password required") + } + + if user.AgreeToTerms != true { + return errors.New("must agree to terms") + } + + // hash password + // bcrypt's function also generates a salt + + passwordHash, err := model.bcrypt.GenerateFromPassword([]byte(user.Password), model.bcrypt.GetDefaultCost()) + if err != nil { + return err + } + + user.PasswordHash = string(passwordHash) + user.Password = "" + user.EmailVerified = false + user.EmailVerifyCode, err = util.NewGuid() + + if err != nil { + return err + } + + err = model.db.InsertUser(user) + + if err != nil { + return err + } + + err = model.SendVerificationEmail(user) + + if err != nil { + log.Println(err) + } + + return nil +} + +func (model *Model) VerifyUser(code string) error { + if code == "" { + return errors.New("code required") + } + + return model.db.VerifyUser(code) +} + +func (model *Model) UpdateUser(user *types.User) error { + if user.Id == "" { + return errors.New("id required") + } + + if user.Password == "" { + return errors.New("password required") + } + + // hash password + // bcrypt's function also generates a salt + + passwordHash, err := model.bcrypt.GenerateFromPassword([]byte(user.Password), model.bcrypt.GetDefaultCost()) + if err != nil { + return err + } + + user.PasswordHash = string(passwordHash) + user.Password = "" + + return model.db.UpdateUser(user) +} + +func (model *Model) ResetPassword(email string) error { + if email == "" { + return errors.New("email required") + } + + user, err := model.db.GetVerifiedUserByEmail(email) + + if err != nil { + // Don't send back error so people can't try to find user accounts + log.Printf("Invalid email for reset password " + email) + return nil + } + + user.PasswordReset, err = util.NewGuid() + + if err != nil { + return err + } + + err = model.db.UpdateUserResetPassword(user) + + if err != nil { + return err + } + + return model.SendPasswordResetEmail(user) +} + +func (model *Model) ConfirmResetPassword(password string, code string) (*types.User, error) { + if password == "" { + return nil, errors.New("password required") + } + + if code == "" { + return nil, errors.New("code required") + } + + user, err := model.db.GetUserByResetCode(code) + + if err != nil { + return nil, errors.New("Invalid code") + } + + passwordHash, err := model.bcrypt.GenerateFromPassword([]byte(password), model.bcrypt.GetDefaultCost()) + if err != nil { + return nil, err + } + + user.PasswordHash = string(passwordHash) + user.Password = "" + + err = model.db.UpdateUser(user) + + if err != nil { + return nil, err + } + + return user, nil +} + +func (model *Model) SendVerificationEmail(user *types.User) error { + log.Println("Sending verification email to " + user.Email) + + link := model.config.WebUrl + "/user/verify?code=" + user.EmailVerifyCode + + from := mail.NewEmail(model.config.SendgridSender, model.config.SendgridEmail) + subject := "Verify your email" + to := mail.NewEmail(user.FirstName+" "+user.LastName, user.Email) + + plainTextContent := "Thank you for signing up with Open Accounting! " + + "Please click on the link below to verify your email address:\n\n" + link + htmlContent := "Thank you for signing up with Open Accounting! " + + "Please click on the link below to verify your email address:

" + + "" + link + "" + + message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent) + client := sendgrid.NewSendClient(model.config.SendgridKey) + response, err := client.Send(message) + + if err != nil { + return err + } + + log.Println(response.StatusCode) + log.Println(response.Body) + log.Println(response.Headers) + + return nil +} + +func (model *Model) SendPasswordResetEmail(user *types.User) error { + log.Println("Sending password reset email to " + user.Email) + + link := model.config.WebUrl + "/user/reset-password?code=" + user.PasswordReset + + from := mail.NewEmail(model.config.SendgridSender, model.config.SendgridEmail) + subject := "Reset password" + to := mail.NewEmail(user.FirstName+" "+user.LastName, user.Email) + + plainTextContent := "Please click the following link to reset your password:\n\n" + link + + "If you did not request to have your password reset, please ignore this email and " + + "nothing will happen." + htmlContent := "Please click the following link to reset your password:

\n" + + "" + link + "

\n" + + "If you did not request to have your password reset, please ignore this email and " + + "nothing will happen." + + message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent) + client := sendgrid.NewSendClient(model.config.SendgridKey) + response, err := client.Send(message) + + if err != nil { + return err + } + + log.Println(response.StatusCode) + log.Println(response.Body) + log.Println(response.Headers) + + return nil +} diff --git a/core/model/user_test.go b/core/model/user_test.go new file mode 100644 index 0000000..6d82e26 --- /dev/null +++ b/core/model/user_test.go @@ -0,0 +1,177 @@ +package model + +import ( + "errors" + "github.com/openaccounting/oa-server/core/mocks" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +type TdUser struct { + db.Datastore + testNum int +} + +func (td *TdUser) InsertUser(user *types.User) error { + return nil +} + +func (td *TdUser) UpdateUser(user *types.User) error { + return nil +} + +func TestCreateUser(t *testing.T) { + + // Id string `json:"id"` + // Inserted time.Time `json:"inserted"` + // Updated time.Time `json:"updated"` + // FirstName string `json:"firstName"` + // LastName string `json:"lastName"` + // Email string `json:"email"` + // Password string `json:"password"` + // PasswordHash string `json:"-"` + // AgreeToTerms bool `json:"agreeToTerms"` + // PasswordReset string `json:"-"` + // EmailVerified bool `json:"emailVerified"` + // EmailVerifyCode string `json:"-"` + + user := types.User{ + "0", + time.Unix(0, 0), + time.Unix(0, 0), + "John", + "Doe", + "johndoe@email.com", + "password", + "", + true, + "", + false, + "", + } + + badUser := types.User{ + "0", + time.Unix(0, 0), + time.Unix(0, 0), + "John", + "Doe", + "", + "password", + "", + true, + "", + false, + "", + } + + tests := map[string]struct { + err error + user types.User + }{ + "successful": { + err: nil, + user: user, + }, + "with error": { + err: errors.New("email required"), + user: badUser, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + user := test.user + + mockBcrypt := new(mocks.Bcrypt) + + mockBcrypt.On("GetDefaultCost").Return(10) + + mockBcrypt.On("GenerateFromPassword", []byte(user.Password), 10). + Return(make([]byte, 0), nil) + + model := NewModel(&TdUser{}, mockBcrypt, types.Config{}) + + err := model.CreateUser(&user) + + assert.Equal(t, err, test.err) + + if err == nil { + mockBcrypt.AssertExpectations(t) + } + } +} + +func TestUpdateUser(t *testing.T) { + + user := types.User{ + "0", + time.Unix(0, 0), + time.Unix(0, 0), + "John2", + "Doe", + "johndoe@email.com", + "password", + "", + true, + "", + false, + "", + } + + badUser := types.User{ + "0", + time.Unix(0, 0), + time.Unix(0, 0), + "John2", + "Doe", + "johndoe@email.com", + "", + "", + true, + "", + false, + "", + } + + tests := map[string]struct { + err error + user types.User + }{ + "successful": { + err: nil, + user: user, + }, + "with error": { + err: errors.New("password required"), + user: badUser, + }, + } + + for name, test := range tests { + t.Logf("Running test case: %s", name) + + user := test.user + + mockBcrypt := new(mocks.Bcrypt) + + mockBcrypt.On("GetDefaultCost").Return(10) + + mockBcrypt.On("GenerateFromPassword", []byte(user.Password), 10). + Return(make([]byte, 0), nil) + + model := NewModel(&TdUser{}, mockBcrypt, types.Config{}) + + err := model.UpdateUser(&user) + + assert.Equal(t, err, test.err) + + if err == nil { + mockBcrypt.AssertExpectations(t) + } + } +} diff --git a/core/server.go b/core/server.go new file mode 100644 index 0000000..1f1ae41 --- /dev/null +++ b/core/server.go @@ -0,0 +1,50 @@ +package main + +import ( + "encoding/json" + "github.com/openaccounting/oa-server/core/api" + "github.com/openaccounting/oa-server/core/auth" + "github.com/openaccounting/oa-server/core/model" + "github.com/openaccounting/oa-server/core/model/db" + "github.com/openaccounting/oa-server/core/model/types" + "github.com/openaccounting/oa-server/core/util" + "log" + "net/http" + "os" + "strconv" + //"fmt" +) + +func main() { + //filename is the path to the json config file + var config types.Config + file, err := os.Open("./config.json") + + if err != nil { + log.Fatal(err) + } + + decoder := json.NewDecoder(file) + err = decoder.Decode(&config) + + if err != nil { + log.Fatal(err) + } + + connectionString := config.User + ":" + config.Password + "@/" + config.Database + + db, err := db.NewDB(connectionString) + + bc := &util.StandardBcrypt{} + + model.NewModel(db, bc, config) + auth.NewAuthService(db, bc) + + app, err := api.Init() + + if err != nil { + log.Fatal(err) + } + + log.Fatal(http.ListenAndServeTLS(":"+strconv.Itoa(config.Port), config.CertFile, config.KeyFile, app.MakeHandler())) +} diff --git a/core/util/bcrypt.go b/core/util/bcrypt.go new file mode 100644 index 0000000..b566a18 --- /dev/null +++ b/core/util/bcrypt.go @@ -0,0 +1,26 @@ +package util + +import ( + "golang.org/x/crypto/bcrypt" +) + +type Bcrypt interface { + GenerateFromPassword([]byte, int) ([]byte, error) + CompareHashAndPassword([]byte, []byte) error + GetDefaultCost() int +} + +type StandardBcrypt struct { +} + +func (bc *StandardBcrypt) GenerateFromPassword(password []byte, cost int) ([]byte, error) { + return bcrypt.GenerateFromPassword(password, cost) +} + +func (bc *StandardBcrypt) CompareHashAndPassword(hashedPassword, password []byte) error { + return bcrypt.CompareHashAndPassword(hashedPassword, password) +} + +func (bc *StandardBcrypt) GetDefaultCost() int { + return bcrypt.DefaultCost +} diff --git a/core/util/util.go b/core/util/util.go new file mode 100644 index 0000000..7ea982e --- /dev/null +++ b/core/util/util.go @@ -0,0 +1,46 @@ +package util + +import ( + "crypto/rand" + "encoding/hex" + "time" +) + +func Round64(input float64) int64 { + if input < 0 { + return int64(input - 0.5) + } + return int64(input + 0.5) +} + +func TimeToMs(date time.Time) int64 { + return date.UnixNano() / 1000000 +} + +func MsToTime(ms int64) time.Time { + return time.Unix(0, ms*1000000) +} + +func NewGuid() (string, error) { + byteArray := make([]byte, 16) + + _, err := rand.Read(byteArray) + + if err != nil { + return "", err + } + + return hex.EncodeToString(byteArray), nil +} + +func NewInviteId() (string, error) { + byteArray := make([]byte, 4) + + _, err := rand.Read(byteArray) + + if err != nil { + return "", err + } + + return hex.EncodeToString(byteArray), nil +} diff --git a/core/ws/ws.go b/core/ws/ws.go new file mode 100644 index 0000000..34a31e8 --- /dev/null +++ b/core/ws/ws.go @@ -0,0 +1,341 @@ +package ws + +import ( + "encoding/json" + "errors" + "github.com/Masterminds/semver" + "github.com/ant0ine/go-json-rest/rest" + "github.com/gorilla/websocket" + "github.com/mitchellh/mapstructure" + "github.com/openaccounting/oa-server/core/auth" + "github.com/openaccounting/oa-server/core/model/types" + "log" + "net/http" + "sync" +) + +const version = "1.0.0" + +//var upgrader = websocket.Upgrader{} // use default options +var txSubscriptions = make(map[string][]*websocket.Conn) +var accountSubscriptions = make(map[string][]*websocket.Conn) +var priceSubscriptions = make(map[string][]*websocket.Conn) +var userMap = make(map[*websocket.Conn]*types.User) +var sequenceNumbers = make(map[*websocket.Conn]int) +var locks = make(map[*websocket.Conn]*sync.Mutex) + +type Message struct { + Version string `json:"version"` + SequenceNumber int `json:"sequenceNumber"` + Type string `json:"type"` + Action string `json:"action"` + Data interface{} `json:"data"` +} + +func Handler(w rest.ResponseWriter, r *rest.Request) { + c, err := websocket.Upgrade(w.(http.ResponseWriter), r.Request, nil, 0, 0) + if err != nil { + log.Print("upgrade:", err) + return + } + + sequenceNumbers[c] = -1 + locks[c] = &sync.Mutex{} + + defer c.Close() + for { + mt, messageData, err := c.ReadMessage() + if err != nil { + log.Println("readerr:", err) + // remove connection from maps + unsubscribeAll(c) + break + } + + if mt != websocket.TextMessage { + log.Println("Unsupported message type", mt) + continue + } + + message := Message{} + + err = json.Unmarshal(messageData, &message) + + if err != nil { + log.Println("Could not parse message:", string(messageData)) + continue + } + + log.Printf("recv: %s", message) + + // check version + err = checkVersion(message.Version) + + if err != nil { + log.Println(err.Error()) + writeMessage(c, websocket.CloseMessage, websocket.FormatCloseMessage(4001, err.Error())) + break + } + + // make sure they are authenticated + if userMap[c] == nil { + log.Println("checking message for authentication") + err = authenticate(message, c) + if err != nil { + log.Println("Authentication error " + err.Error()) + writeMessage(c, websocket.CloseMessage, websocket.FormatCloseMessage(4000, err.Error())) + break + } + continue + } + + err = processMessage(message, c) + + if err != nil { + log.Println(err) + continue + } + } +} + +func getKey(userId string, orgId string) string { + return userId + "-" + orgId +} + +func processMessage(message Message, conn *websocket.Conn) error { + var dataString string + err := mapstructure.Decode(message.Data, &dataString) + + if err != nil { + return err + } + + key := getKey(userMap[conn].Id, dataString) + + log.Println(message.Action, message.Type, dataString) + + switch message.Action { + case "subscribe": + switch message.Type { + case "transaction": + subscribe(conn, key, txSubscriptions) + case "account": + subscribe(conn, key, accountSubscriptions) + case "price": + subscribe(conn, key, priceSubscriptions) + default: + return errors.New("Unhandled message type: " + message.Type) + } + case "unsubscribe": + switch message.Type { + case "transaction": + unsubscribe(conn, key, txSubscriptions) + case "account": + unsubscribe(conn, key, accountSubscriptions) + case "price": + unsubscribe(conn, key, priceSubscriptions) + default: + return errors.New("Unhandled message type: " + message.Type) + } + case "ping": + sequenceNumbers[conn]++ + response := Message{version, sequenceNumbers[conn], "pong", "pong", nil} + responseData, err := json.Marshal(response) + + if err != nil { + return err + } + + err = writeMessage(conn, websocket.TextMessage, responseData) + + if err != nil { + unsubscribeAll(conn) + return err + } + } + + return nil +} + +func subscribe(conn *websocket.Conn, key string, clientMap map[string][]*websocket.Conn) { + conns := clientMap[key] + alreadySubscribed := false + + for _, c := range conns { + if conn == c { + alreadySubscribed = true + } + } + + if alreadySubscribed == false { + clientMap[key] = append(clientMap[key], conn) + } +} + +func unsubscribe(conn *websocket.Conn, key string, clientMap map[string][]*websocket.Conn) { + newConns := clientMap[key][:0] + + for _, c := range clientMap[key] { + if conn != c { + newConns = append(newConns, c) + } + } +} + +func unsubscribeAll(conn *websocket.Conn) { + for key, conns := range txSubscriptions { + newConns := conns[:0] + for _, c := range conns { + if conn != c { + newConns = append(newConns, c) + } + } + txSubscriptions[key] = newConns + } + + for key, conns := range accountSubscriptions { + newConns := conns[:0] + for _, c := range conns { + if conn != c { + newConns = append(newConns, c) + } + } + accountSubscriptions[key] = newConns + } + + for key, conns := range priceSubscriptions { + newConns := conns[:0] + for _, c := range conns { + if conn != c { + newConns = append(newConns, c) + } + } + priceSubscriptions[key] = newConns + } + + delete(userMap, conn) + delete(sequenceNumbers, conn) + delete(locks, conn) +} + +func PushTransaction(transaction *types.Transaction, userIds []string, action string) { + log.Println(txSubscriptions) + + message := Message{version, -1, "transaction", action, transaction} + + for _, userId := range userIds { + key := getKey(userId, transaction.OrgId) + for _, conn := range txSubscriptions[key] { + sequenceNumbers[conn]++ + message.SequenceNumber = sequenceNumbers[conn] + messageData, err := json.Marshal(message) + + if err != nil { + log.Println("PushTransaction json error:", err) + return + } + + err = writeMessage(conn, websocket.TextMessage, messageData) + + if err != nil { + log.Println("Cannot PushTransaction to client:", err) + unsubscribeAll(conn) + } + } + } +} + +func PushAccount(account *types.Account, userIds []string, action string) { + message := Message{version, -1, "account", action, account} + + for _, userId := range userIds { + key := getKey(userId, account.OrgId) + for _, conn := range accountSubscriptions[key] { + sequenceNumbers[conn]++ + message.SequenceNumber = sequenceNumbers[conn] + messageData, err := json.Marshal(message) + + if err != nil { + log.Println("PushAccount error:", err) + return + } + err = writeMessage(conn, websocket.TextMessage, messageData) + + if err != nil { + log.Println("Cannot PushAccount to client:", err) + unsubscribeAll(conn) + } + } + } +} + +func PushPrice(price *types.Price, userIds []string, action string) { + message := Message{version, -1, "price", action, price} + + for _, userId := range userIds { + key := getKey(userId, price.OrgId) + for _, conn := range priceSubscriptions[key] { + sequenceNumbers[conn]++ + message.SequenceNumber = sequenceNumbers[conn] + messageData, err := json.Marshal(message) + + if err != nil { + log.Println("PushPrice error:", err) + return + } + + err = writeMessage(conn, websocket.TextMessage, messageData) + + if err != nil { + log.Println("Cannot PushPrice to client:", err) + unsubscribeAll(conn) + } + } + } +} + +func authenticate(message Message, conn *websocket.Conn) error { + var id string + err := mapstructure.Decode(message.Data, &id) + + if err != nil { + return err + } + + if message.Action != "authenticate" { + return errors.New("Authentication required") + } + + user, err := auth.Instance.Authenticate(id, "") + if err != nil { + return err + } + + userMap[conn] = user + + return nil +} + +func checkVersion(clientVersion string) error { + constraint, err := semver.NewConstraint(clientVersion) + + if err != nil { + return errors.New("Invalid version") + } + + serverVersion, _ := semver.NewVersion(version) + + versionMatch := constraint.Check(serverVersion) + + if versionMatch != true { + return errors.New("Invalid version") + } + + return nil +} + +func writeMessage(conn *websocket.Conn, messageType int, data []byte) error { + locks[conn].Lock() + defer locks[conn].Unlock() + return conn.WriteMessage(messageType, data) +} diff --git a/indexes.sql b/indexes.sql new file mode 100644 index 0000000..7d93181 --- /dev/null +++ b/indexes.sql @@ -0,0 +1,5 @@ +CREATE INDEX account_orgId_index ON account (orgId); +CREATE INDEX split_accountId_index ON split (accountId); +CREATE INDEX split_transactionId_index ON split (transactionId); +CREATE INDEX split_date_index ON split (date); +CREATE INDEX split_updated_index ON split (updated); \ No newline at end of file diff --git a/runTests b/runTests new file mode 100755 index 0000000..5162c03 --- /dev/null +++ b/runTests @@ -0,0 +1,4 @@ +#!/bin/sh + +go test ./core/model +go test ./core/auth diff --git a/schema.sql b/schema.sql new file mode 100644 index 0000000..64ccb41 --- /dev/null +++ b/schema.sql @@ -0,0 +1,33 @@ +DROP DATABASE IF EXISTS `openaccounting`; + +CREATE DATABASE openaccounting CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; + +use openaccounting; + +CREATE TABLE org (id BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, name VARCHAR(100) NOT NULL, currency VARCHAR(10) NOT NULL, `precision` INT NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE user (id BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, firstName VARCHAR(50) NOT NULL, lastName VARCHAR(50) NOT NULL, email VARCHAR(100) NOT NULL, passwordHash VARCHAR(100) NOT NULL, agreeToTerms BOOLEAN NOT NULL, passwordReset VARCHAR(32) NOT NULL, emailVerified BOOLEAN NOT NULL, emailVerifyCode VARCHAR(32) NOT NULL, UNIQUE(email), PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE userorg (id INT UNSIGNED NOT NULL AUTO_INCREMENT, userId BINARY(16) NOT NULL, orgId BINARY(16) NOT NULL, admin BOOLEAN NOT NULL DEFAULT false, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE token (id BINARY(16) NOT NULL, name VARCHAR(100), userOrgId INT UNSIGNED NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE account (id BINARY(16) NOT NULL, orgId BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, name VARCHAR(100) NOT NULL, parent BINARY(16) NOT NULL, currency VARCHAR(10) NOT NULL, `precision` INT NOT NULL, debitBalance BOOLEAN NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE transaction (id BINARY(16) NOT NULL, orgId BINARY(16) NOT NULL, userId BINARY(16) NOT NULL, date BIGINT UNSIGNED NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, description VARCHAR(300) NOT NULL, data TEXT NOT NULL, deleted BOOLEAN NOT NULL DEFAULT false, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE split (id INT UNSIGNED NOT NULL AUTO_INCREMENT, transactionId BINARY(16) NOT NULL, accountId BINARY(16) NOT NULL, date BIGINT UNSIGNED NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, amount BIGINT NOT NULL, nativeAmount BIGINT NOT NULL, deleted BOOLEAN NOT NULL DEFAULT false, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE balance (id INT UNSIGNED NOT NULL AUTO_INCREMENT, date BIGINT UNSIGNED NOT NULL, accountId BINARY(16) NOT NULL, amount BIGINT NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE permission (id BINARY(16) NOT NULL, userId BINARY(16), tokenId BINARY(16), orgId BINARY(16) NOT NULL, accountId BINARY(16) NOT NULL, type INT UNSIGNED NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE price (id BINARY(16) NOT NULL, orgId BINARY(16) NOT NULL, currency VARCHAR(10) NOT NULL, date BIGINT UNSIGNED NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, price DOUBLE UNSIGNED NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE session (id BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, userId BINARY(16) NOT NULL, `terminated` BIGINT UNSIGNED, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE apikey (id BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, userId BINARY(16) NOT NULL, label VARCHAR(300) NOT NULL, deleted BIGINT UNSIGNED, PRIMARY KEY(id)) ENGINE=InnoDB; + +CREATE TABLE invite (id VARCHAR(32) NOT NULL, orgId BINARY(16) NOT NULL, inserted BIGINT UNSIGNED NOT NULL, updated BIGINT UNSIGNED NOT NULL, email VARCHAR(100) NOT NULL, accepted BOOLEAN NOT NULL, PRIMARY KEY(id)) ENGINE=InnoDB; + +GRANT ALL ON openaccounting.* TO 'openaccounting'@'localhost' IDENTIFIED BY 'openaccounting'; \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 0000000..b888e20 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,86 @@ +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 0000000..0da4aea --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,20 @@ +The Masterminds +Copyright (C) 2014-2015, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 0000000..a7a1b4e --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 0000000..af845f1 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,186 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 0000000..b2778df --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 0000000..a782358 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 0000000..2f3d779 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,406 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 0000000..6a6c24c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 0000000..ab3a368 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,421 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. + + oi, n1 := strconv.ParseInt(o, 10, 64) + si, n2 := strconv.ParseInt(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/ant0ine/go-json-rest/LICENSE b/vendor/github.com/ant0ine/go-json-rest/LICENSE new file mode 100644 index 0000000..7800c4b --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013-2016 Antoine Imbert + +The MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go b/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go new file mode 100644 index 0000000..d82894a --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/access_log_apache.go @@ -0,0 +1,236 @@ +package rest + +import ( + "bytes" + "fmt" + "log" + "net" + "os" + "strings" + "text/template" + "time" +) + +// TODO Future improvements: +// * support %{strftime}t ? +// * support %{
}o to print headers + +// AccessLogFormat defines the format of the access log record. +// This implementation is a subset of Apache mod_log_config. +// (See http://httpd.apache.org/docs/2.0/mod/mod_log_config.html) +// +// %b content length in bytes, - if 0 +// %B content length in bytes +// %D response elapsed time in microseconds +// %h remote address +// %H server protocol +// %l identd logname, not supported, - +// %m http method +// %P process id +// %q query string +// %r first line of the request +// %s status code +// %S status code preceeded by a terminal color +// %t time of the request +// %T response elapsed time in seconds, 3 decimals +// %u remote user, - if missing +// %{User-Agent}i user agent, - if missing +// %{Referer}i referer, - is missing +// +// Some predefined formats are provided as contants. +type AccessLogFormat string + +const ( + // CommonLogFormat is the Common Log Format (CLF). + CommonLogFormat = "%h %l %u %t \"%r\" %s %b" + + // CombinedLogFormat is the NCSA extended/combined log format. + CombinedLogFormat = "%h %l %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-Agent}i\"" + + // DefaultLogFormat is the default format, colored output and response time, convenient for development. + DefaultLogFormat = "%t %S\033[0m \033[36;1m%Dμs\033[0m \"%r\" \033[1;30m%u \"%{User-Agent}i\"\033[0m" +) + +// AccessLogApacheMiddleware produces the access log following a format inspired by Apache +// mod_log_config. It depends on TimerMiddleware and RecorderMiddleware that should be in the wrapped +// middlewares. It also uses request.Env["REMOTE_USER"].(string) set by the auth middlewares. +type AccessLogApacheMiddleware struct { + + // Logger points to the logger object used by this middleware, it defaults to + // log.New(os.Stderr, "", 0). + Logger *log.Logger + + // Format defines the format of the access log record. See AccessLogFormat for the details. + // It defaults to DefaultLogFormat. + Format AccessLogFormat + + textTemplate *template.Template +} + +// MiddlewareFunc makes AccessLogApacheMiddleware implement the Middleware interface. +func (mw *AccessLogApacheMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + // set default format + if mw.Format == "" { + mw.Format = DefaultLogFormat + } + + mw.convertFormat() + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + util := &accessLogUtil{w, r} + + mw.Logger.Print(mw.executeTextTemplate(util)) + } +} + +var apacheAdapter = strings.NewReplacer( + "%b", "{{.BytesWritten | dashIf0}}", + "%B", "{{.BytesWritten}}", + "%D", "{{.ResponseTime | microseconds}}", + "%h", "{{.ApacheRemoteAddr}}", + "%H", "{{.R.Proto}}", + "%l", "-", + "%m", "{{.R.Method}}", + "%P", "{{.Pid}}", + "%q", "{{.ApacheQueryString}}", + "%r", "{{.R.Method}} {{.R.URL.RequestURI}} {{.R.Proto}}", + "%s", "{{.StatusCode}}", + "%S", "\033[{{.StatusCode | statusCodeColor}}m{{.StatusCode}}", + "%t", "{{if .StartTime}}{{.StartTime.Format \"02/Jan/2006:15:04:05 -0700\"}}{{end}}", + "%T", "{{if .ResponseTime}}{{.ResponseTime.Seconds | printf \"%.3f\"}}{{end}}", + "%u", "{{.RemoteUser | dashIfEmptyStr}}", + "%{User-Agent}i", "{{.R.UserAgent | dashIfEmptyStr}}", + "%{Referer}i", "{{.R.Referer | dashIfEmptyStr}}", +) + +// Convert the Apache access log format into a text/template +func (mw *AccessLogApacheMiddleware) convertFormat() { + + tmplText := apacheAdapter.Replace(string(mw.Format)) + + funcMap := template.FuncMap{ + "dashIfEmptyStr": func(value string) string { + if value == "" { + return "-" + } + return value + }, + "dashIf0": func(value int64) string { + if value == 0 { + return "-" + } + return fmt.Sprintf("%d", value) + }, + "microseconds": func(dur *time.Duration) string { + if dur != nil { + return fmt.Sprintf("%d", dur.Nanoseconds()/1000) + } + return "" + }, + "statusCodeColor": func(statusCode int) string { + if statusCode >= 400 && statusCode < 500 { + return "1;33" + } else if statusCode >= 500 { + return "0;31" + } + return "0;32" + }, + } + + var err error + mw.textTemplate, err = template.New("accessLog").Funcs(funcMap).Parse(tmplText) + if err != nil { + panic(err) + } +} + +// Execute the text template with the data derived from the request, and return a string. +func (mw *AccessLogApacheMiddleware) executeTextTemplate(util *accessLogUtil) string { + buf := bytes.NewBufferString("") + err := mw.textTemplate.Execute(buf, util) + if err != nil { + panic(err) + } + return buf.String() +} + +// accessLogUtil provides a collection of utility functions that devrive data from the Request object. +// This object is used to provide data to the Apache Style template and the the JSON log record. +type accessLogUtil struct { + W ResponseWriter + R *Request +} + +// As stored by the auth middlewares. +func (u *accessLogUtil) RemoteUser() string { + if u.R.Env["REMOTE_USER"] != nil { + return u.R.Env["REMOTE_USER"].(string) + } + return "" +} + +// If qs exists then return it with a leadin "?", apache log style. +func (u *accessLogUtil) ApacheQueryString() string { + if u.R.URL.RawQuery != "" { + return "?" + u.R.URL.RawQuery + } + return "" +} + +// When the request entered the timer middleware. +func (u *accessLogUtil) StartTime() *time.Time { + if u.R.Env["START_TIME"] != nil { + return u.R.Env["START_TIME"].(*time.Time) + } + return nil +} + +// If remoteAddr is set then return is without the port number, apache log style. +func (u *accessLogUtil) ApacheRemoteAddr() string { + remoteAddr := u.R.RemoteAddr + if remoteAddr != "" { + if ip, _, err := net.SplitHostPort(remoteAddr); err == nil { + return ip + } + } + return "" +} + +// As recorded by the recorder middleware. +func (u *accessLogUtil) StatusCode() int { + if u.R.Env["STATUS_CODE"] != nil { + return u.R.Env["STATUS_CODE"].(int) + } + return 0 +} + +// As mesured by the timer middleware. +func (u *accessLogUtil) ResponseTime() *time.Duration { + if u.R.Env["ELAPSED_TIME"] != nil { + return u.R.Env["ELAPSED_TIME"].(*time.Duration) + } + return nil +} + +// Process id. +func (u *accessLogUtil) Pid() int { + return os.Getpid() +} + +// As recorded by the recorder middleware. +func (u *accessLogUtil) BytesWritten() int64 { + if u.R.Env["BYTES_WRITTEN"] != nil { + return u.R.Env["BYTES_WRITTEN"].(int64) + } + return 0 +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go b/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go new file mode 100644 index 0000000..a6bc175 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/access_log_json.go @@ -0,0 +1,88 @@ +package rest + +import ( + "encoding/json" + "log" + "os" + "time" +) + +// AccessLogJsonMiddleware produces the access log with records written as JSON. This middleware +// depends on TimerMiddleware and RecorderMiddleware that must be in the wrapped middlewares. It +// also uses request.Env["REMOTE_USER"].(string) set by the auth middlewares. +type AccessLogJsonMiddleware struct { + + // Logger points to the logger object used by this middleware, it defaults to + // log.New(os.Stderr, "", 0). + Logger *log.Logger +} + +// MiddlewareFunc makes AccessLogJsonMiddleware implement the Middleware interface. +func (mw *AccessLogJsonMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + mw.Logger.Printf("%s", makeAccessLogJsonRecord(r).asJson()) + } +} + +// AccessLogJsonRecord is the data structure used by AccessLogJsonMiddleware to create the JSON +// records. (Public for documentation only, no public method uses it) +type AccessLogJsonRecord struct { + Timestamp *time.Time + StatusCode int + ResponseTime *time.Duration + HttpMethod string + RequestURI string + RemoteUser string + UserAgent string +} + +func makeAccessLogJsonRecord(r *Request) *AccessLogJsonRecord { + + var timestamp *time.Time + if r.Env["START_TIME"] != nil { + timestamp = r.Env["START_TIME"].(*time.Time) + } + + var statusCode int + if r.Env["STATUS_CODE"] != nil { + statusCode = r.Env["STATUS_CODE"].(int) + } + + var responseTime *time.Duration + if r.Env["ELAPSED_TIME"] != nil { + responseTime = r.Env["ELAPSED_TIME"].(*time.Duration) + } + + var remoteUser string + if r.Env["REMOTE_USER"] != nil { + remoteUser = r.Env["REMOTE_USER"].(string) + } + + return &AccessLogJsonRecord{ + Timestamp: timestamp, + StatusCode: statusCode, + ResponseTime: responseTime, + HttpMethod: r.Method, + RequestURI: r.URL.RequestURI(), + RemoteUser: remoteUser, + UserAgent: r.UserAgent(), + } +} + +func (r *AccessLogJsonRecord) asJson() []byte { + b, err := json.Marshal(r) + if err != nil { + panic(err) + } + return b +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/api.go b/vendor/github.com/ant0ine/go-json-rest/rest/api.go new file mode 100644 index 0000000..6295430 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/api.go @@ -0,0 +1,83 @@ +package rest + +import ( + "net/http" +) + +// Api defines a stack of Middlewares and an App. +type Api struct { + stack []Middleware + app App +} + +// NewApi makes a new Api object. The Middleware stack is empty, and the App is nil. +func NewApi() *Api { + return &Api{ + stack: []Middleware{}, + app: nil, + } +} + +// Use pushes one or multiple middlewares to the stack for middlewares +// maintained in the Api object. +func (api *Api) Use(middlewares ...Middleware) { + api.stack = append(api.stack, middlewares...) +} + +// SetApp sets the App in the Api object. +func (api *Api) SetApp(app App) { + api.app = app +} + +// MakeHandler wraps all the Middlewares of the stack and the App together, and returns an +// http.Handler ready to be used. If the Middleware stack is empty the App is used directly. If the +// App is nil, a HandlerFunc that does nothing is used instead. +func (api *Api) MakeHandler() http.Handler { + var appFunc HandlerFunc + if api.app != nil { + appFunc = api.app.AppFunc() + } else { + appFunc = func(w ResponseWriter, r *Request) {} + } + return http.HandlerFunc( + adapterFunc( + WrapMiddlewares(api.stack, appFunc), + ), + ) +} + +// Defines a stack of middlewares convenient for development. Among other things: +// console friendly logging, JSON indentation, error stack strace in the response. +var DefaultDevStack = []Middleware{ + &AccessLogApacheMiddleware{}, + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{ + EnableResponseStackTrace: true, + }, + &JsonIndentMiddleware{}, + &ContentTypeCheckerMiddleware{}, +} + +// Defines a stack of middlewares convenient for production. Among other things: +// Apache CombinedLogFormat logging, gzip compression. +var DefaultProdStack = []Middleware{ + &AccessLogApacheMiddleware{ + Format: CombinedLogFormat, + }, + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{}, + &GzipMiddleware{}, + &ContentTypeCheckerMiddleware{}, +} + +// Defines a stack of middlewares that should be common to most of the middleware stacks. +var DefaultCommonStack = []Middleware{ + &TimerMiddleware{}, + &RecorderMiddleware{}, + &PoweredByMiddleware{}, + &RecoverMiddleware{}, +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go b/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go new file mode 100644 index 0000000..dbf254c --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/auth_basic.go @@ -0,0 +1,100 @@ +package rest + +import ( + "encoding/base64" + "errors" + "log" + "net/http" + "strings" +) + +// AuthBasicMiddleware provides a simple AuthBasic implementation. On failure, a 401 HTTP response +//is returned. On success, the wrapped middleware is called, and the userId is made available as +// request.Env["REMOTE_USER"].(string) +type AuthBasicMiddleware struct { + + // Realm name to display to the user. Required. + Realm string + + // Callback function that should perform the authentication of the user based on userId and + // password. Must return true on success, false on failure. Required. + Authenticator func(userId string, password string) bool + + // Callback function that should perform the authorization of the authenticated user. Called + // only after an authentication success. Must return true on success, false on failure. + // Optional, default to success. + Authorizator func(userId string, request *Request) bool +} + +// MiddlewareFunc makes AuthBasicMiddleware implement the Middleware interface. +func (mw *AuthBasicMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + if mw.Realm == "" { + log.Fatal("Realm is required") + } + + if mw.Authenticator == nil { + log.Fatal("Authenticator is required") + } + + if mw.Authorizator == nil { + mw.Authorizator = func(userId string, request *Request) bool { + return true + } + } + + return func(writer ResponseWriter, request *Request) { + + authHeader := request.Header.Get("Authorization") + if authHeader == "" { + mw.unauthorized(writer) + return + } + + providedUserId, providedPassword, err := mw.decodeBasicAuthHeader(authHeader) + + if err != nil { + Error(writer, "Invalid authentication", http.StatusBadRequest) + return + } + + if !mw.Authenticator(providedUserId, providedPassword) { + mw.unauthorized(writer) + return + } + + if !mw.Authorizator(providedUserId, request) { + mw.unauthorized(writer) + return + } + + request.Env["REMOTE_USER"] = providedUserId + + handler(writer, request) + } +} + +func (mw *AuthBasicMiddleware) unauthorized(writer ResponseWriter) { + writer.Header().Set("WWW-Authenticate", "Basic realm="+mw.Realm) + Error(writer, "Not Authorized", http.StatusUnauthorized) +} + +func (mw *AuthBasicMiddleware) decodeBasicAuthHeader(header string) (user string, password string, err error) { + + parts := strings.SplitN(header, " ", 2) + if !(len(parts) == 2 && parts[0] == "Basic") { + return "", "", errors.New("Invalid authentication") + } + + decoded, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", errors.New("Invalid base64") + } + + creds := strings.SplitN(string(decoded), ":", 2) + if len(creds) != 2 { + return "", "", errors.New("Invalid authentication") + } + + return creds[0], creds[1], nil +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go b/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go new file mode 100644 index 0000000..1d87877 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/content_type_checker.go @@ -0,0 +1,40 @@ +package rest + +import ( + "mime" + "net/http" + "strings" +) + +// ContentTypeCheckerMiddleware verifies the request Content-Type header and returns a +// StatusUnsupportedMediaType (415) HTTP error response if it's incorrect. The expected +// Content-Type is 'application/json' if the content is non-null. Note: If a charset parameter +// exists, it MUST be UTF-8. +type ContentTypeCheckerMiddleware struct{} + +// MiddlewareFunc makes ContentTypeCheckerMiddleware implement the Middleware interface. +func (mw *ContentTypeCheckerMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + return func(w ResponseWriter, r *Request) { + + mediatype, params, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + charset, ok := params["charset"] + if !ok { + charset = "UTF-8" + } + + // per net/http doc, means that the length is known and non-null + if r.ContentLength > 0 && + !(mediatype == "application/json" && strings.ToUpper(charset) == "UTF-8") { + + Error(w, + "Bad Content-Type or charset, expected 'application/json'", + http.StatusUnsupportedMediaType, + ) + return + } + + // call the wrapped handler + handler(w, r) + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/cors.go b/vendor/github.com/ant0ine/go-json-rest/rest/cors.go new file mode 100644 index 0000000..5b00543 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/cors.go @@ -0,0 +1,135 @@ +package rest + +import ( + "net/http" + "strconv" + "strings" +) + +// Possible improvements: +// If AllowedMethods["*"] then Access-Control-Allow-Methods is set to the requested methods +// If AllowedHeaderss["*"] then Access-Control-Allow-Headers is set to the requested headers +// Put some presets in AllowedHeaders +// Put some presets in AccessControlExposeHeaders + +// CorsMiddleware provides a configurable CORS implementation. +type CorsMiddleware struct { + allowedMethods map[string]bool + allowedMethodsCsv string + allowedHeaders map[string]bool + allowedHeadersCsv string + + // Reject non CORS requests if true. See CorsInfo.IsCors. + RejectNonCorsRequests bool + + // Function excecuted for every CORS requests to validate the Origin. (Required) + // Must return true if valid, false if invalid. + // For instance: simple equality, regexp, DB lookup, ... + OriginValidator func(origin string, request *Request) bool + + // List of allowed HTTP methods. Note that the comparison will be made in + // uppercase to avoid common mistakes. And that the + // Access-Control-Allow-Methods response header also uses uppercase. + // (see CorsInfo.AccessControlRequestMethod) + AllowedMethods []string + + // List of allowed HTTP Headers. Note that the comparison will be made with + // noarmalized names (http.CanonicalHeaderKey). And that the response header + // also uses normalized names. + // (see CorsInfo.AccessControlRequestHeaders) + AllowedHeaders []string + + // List of headers used to set the Access-Control-Expose-Headers header. + AccessControlExposeHeaders []string + + // User to se the Access-Control-Allow-Credentials response header. + AccessControlAllowCredentials bool + + // Used to set the Access-Control-Max-Age response header, in seconds. + AccessControlMaxAge int +} + +// MiddlewareFunc makes CorsMiddleware implement the Middleware interface. +func (mw *CorsMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + // precompute as much as possible at init time + + mw.allowedMethods = map[string]bool{} + normedMethods := []string{} + for _, allowedMethod := range mw.AllowedMethods { + normed := strings.ToUpper(allowedMethod) + mw.allowedMethods[normed] = true + normedMethods = append(normedMethods, normed) + } + mw.allowedMethodsCsv = strings.Join(normedMethods, ",") + + mw.allowedHeaders = map[string]bool{} + normedHeaders := []string{} + for _, allowedHeader := range mw.AllowedHeaders { + normed := http.CanonicalHeaderKey(allowedHeader) + mw.allowedHeaders[normed] = true + normedHeaders = append(normedHeaders, normed) + } + mw.allowedHeadersCsv = strings.Join(normedHeaders, ",") + + return func(writer ResponseWriter, request *Request) { + + corsInfo := request.GetCorsInfo() + + // non CORS requests + if !corsInfo.IsCors { + if mw.RejectNonCorsRequests { + Error(writer, "Non CORS request", http.StatusForbidden) + return + } + // continue, execute the wrapped middleware + handler(writer, request) + return + } + + // Validate the Origin + if mw.OriginValidator(corsInfo.Origin, request) == false { + Error(writer, "Invalid Origin", http.StatusForbidden) + return + } + + if corsInfo.IsPreflight { + + // check the request methods + if mw.allowedMethods[corsInfo.AccessControlRequestMethod] == false { + Error(writer, "Invalid Preflight Request", http.StatusForbidden) + return + } + + // check the request headers + for _, requestedHeader := range corsInfo.AccessControlRequestHeaders { + if mw.allowedHeaders[requestedHeader] == false { + Error(writer, "Invalid Preflight Request", http.StatusForbidden) + return + } + } + + writer.Header().Set("Access-Control-Allow-Methods", mw.allowedMethodsCsv) + writer.Header().Set("Access-Control-Allow-Headers", mw.allowedHeadersCsv) + writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) + if mw.AccessControlAllowCredentials == true { + writer.Header().Set("Access-Control-Allow-Credentials", "true") + } + writer.Header().Set("Access-Control-Max-Age", strconv.Itoa(mw.AccessControlMaxAge)) + writer.WriteHeader(http.StatusOK) + return + } + + // Non-preflight requests + for _, exposed := range mw.AccessControlExposeHeaders { + writer.Header().Add("Access-Control-Expose-Headers", exposed) + } + writer.Header().Set("Access-Control-Allow-Origin", corsInfo.Origin) + if mw.AccessControlAllowCredentials == true { + writer.Header().Set("Access-Control-Allow-Credentials", "true") + } + // continure, execute the wrapped middleware + handler(writer, request) + return + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/doc.go b/vendor/github.com/ant0ine/go-json-rest/rest/doc.go new file mode 100644 index 0000000..fa6f5b2 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/doc.go @@ -0,0 +1,47 @@ +// A quick and easy way to setup a RESTful JSON API +// +// http://ant0ine.github.io/go-json-rest/ +// +// Go-Json-Rest is a thin layer on top of net/http that helps building RESTful JSON APIs easily. +// It provides fast and scalable request routing using a Trie based implementation, helpers to deal +// with JSON requests and responses, and middlewares for functionalities like CORS, Auth, Gzip, +// Status, ... +// +// Example: +// +// package main +// +// import ( +// "github.com/ant0ine/go-json-rest/rest" +// "log" +// "net/http" +// ) +// +// type User struct { +// Id string +// Name string +// } +// +// func GetUser(w rest.ResponseWriter, req *rest.Request) { +// user := User{ +// Id: req.PathParam("id"), +// Name: "Antoine", +// } +// w.WriteJson(&user) +// } +// +// func main() { +// api := rest.NewApi() +// api.Use(rest.DefaultDevStack...) +// router, err := rest.MakeRouter( +// rest.Get("/users/:id", GetUser), +// ) +// if err != nil { +// log.Fatal(err) +// } +// api.SetApp(router) +// log.Fatal(http.ListenAndServe(":8080", api.MakeHandler())) +// } +// +// +package rest diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go b/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go new file mode 100644 index 0000000..0fafc05 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/gzip.go @@ -0,0 +1,132 @@ +package rest + +import ( + "bufio" + "compress/gzip" + "net" + "net/http" + "strings" +) + +// GzipMiddleware is responsible for compressing the payload with gzip and setting the proper +// headers when supported by the client. It must be wrapped by TimerMiddleware for the +// compression time to be captured. And It must be wrapped by RecorderMiddleware for the +// compressed BYTES_WRITTEN to be captured. +type GzipMiddleware struct{} + +// MiddlewareFunc makes GzipMiddleware implement the Middleware interface. +func (mw *GzipMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + // gzip support enabled + canGzip := strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") + // client accepts gzip ? + writer := &gzipResponseWriter{w, false, canGzip, nil} + defer func() { + // need to close gzip writer + if writer.gzipWriter != nil { + writer.gzipWriter.Close() + } + }() + // call the handler with the wrapped writer + h(writer, r) + } +} + +// Private responseWriter intantiated by the gzip middleware. +// It encodes the payload with gzip and set the proper headers. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type gzipResponseWriter struct { + ResponseWriter + wroteHeader bool + canGzip bool + gzipWriter *gzip.Writer +} + +// Set the right headers for gzip encoded responses. +func (w *gzipResponseWriter) WriteHeader(code int) { + + // Always set the Vary header, even if this particular request + // is not gzipped. + w.Header().Add("Vary", "Accept-Encoding") + + if w.canGzip { + w.Header().Set("Content-Encoding", "gzip") + } + + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *gzipResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *gzipResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *gzipResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and encode the payload if necessary. +// Provided in order to implement the http.ResponseWriter interface. +func (w *gzipResponseWriter) Write(b []byte) (int, error) { + + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + writer := w.ResponseWriter.(http.ResponseWriter) + + if w.canGzip { + // Write can be called multiple times for a given response. + // (see the streaming example: + // https://github.com/ant0ine/go-json-rest-examples/tree/master/streaming) + // The gzipWriter is instantiated only once, and flushed after + // each write. + if w.gzipWriter == nil { + w.gzipWriter = gzip.NewWriter(writer) + } + count, errW := w.gzipWriter.Write(b) + errF := w.gzipWriter.Flush() + if errW != nil { + return count, errW + } + if errF != nil { + return count, errF + } + return count, nil + } + + return writer.Write(b) +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/if.go b/vendor/github.com/ant0ine/go-json-rest/rest/if.go new file mode 100644 index 0000000..daa37d1 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/if.go @@ -0,0 +1,53 @@ +package rest + +import ( + "log" +) + +// IfMiddleware evaluates at runtime a condition based on the current request, and decides to +// execute one of the other Middleware based on this boolean. +type IfMiddleware struct { + + // Runtime condition that decides of the execution of IfTrue of IfFalse. + Condition func(r *Request) bool + + // Middleware to run when the condition is true. Note that the middleware is initialized + // weather if will be used or not. (Optional, pass-through if not set) + IfTrue Middleware + + // Middleware to run when the condition is false. Note that the middleware is initialized + // weather if will be used or not. (Optional, pass-through if not set) + IfFalse Middleware +} + +// MiddlewareFunc makes TimerMiddleware implement the Middleware interface. +func (mw *IfMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + if mw.Condition == nil { + log.Fatal("IfMiddleware Condition is required") + } + + var ifTrueHandler HandlerFunc + if mw.IfTrue != nil { + ifTrueHandler = mw.IfTrue.MiddlewareFunc(h) + } else { + ifTrueHandler = h + } + + var ifFalseHandler HandlerFunc + if mw.IfFalse != nil { + ifFalseHandler = mw.IfFalse.MiddlewareFunc(h) + } else { + ifFalseHandler = h + } + + return func(w ResponseWriter, r *Request) { + + if mw.Condition(r) { + ifTrueHandler(w, r) + } else { + ifFalseHandler(w, r) + } + + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go b/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go new file mode 100644 index 0000000..ad9a5ca --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/json_indent.go @@ -0,0 +1,113 @@ +package rest + +import ( + "bufio" + "encoding/json" + "net" + "net/http" +) + +// JsonIndentMiddleware provides JSON encoding with indentation. +// It could be convenient to use it during development. +// It works by "subclassing" the responseWriter provided by the wrapping middleware, +// replacing the writer.EncodeJson and writer.WriteJson implementations, +// and making the parent implementations ignored. +type JsonIndentMiddleware struct { + + // prefix string, as in json.MarshalIndent + Prefix string + + // indentation string, as in json.MarshalIndent + Indent string +} + +// MiddlewareFunc makes JsonIndentMiddleware implement the Middleware interface. +func (mw *JsonIndentMiddleware) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + + if mw.Indent == "" { + mw.Indent = " " + } + + return func(w ResponseWriter, r *Request) { + + writer := &jsonIndentResponseWriter{w, false, mw.Prefix, mw.Indent} + // call the wrapped handler + handler(writer, r) + } +} + +// Private responseWriter intantiated by the middleware. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type jsonIndentResponseWriter struct { + ResponseWriter + wroteHeader bool + prefix string + indent string +} + +// Replace the parent EncodeJson to provide indentation. +func (w *jsonIndentResponseWriter) EncodeJson(v interface{}) ([]byte, error) { + b, err := json.MarshalIndent(v, w.prefix, w.indent) + if err != nil { + return nil, err + } + return b, nil +} + +// Make sure the local EncodeJson and local Write are called. +// Does not call the parent WriteJson. +func (w *jsonIndentResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Call the parent WriteHeader. +func (w *jsonIndentResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *jsonIndentResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *jsonIndentResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *jsonIndentResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and call the parent Write. +// Provided in order to implement the http.ResponseWriter interface. +func (w *jsonIndentResponseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + writer := w.ResponseWriter.(http.ResponseWriter) + return writer.Write(b) +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go b/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go new file mode 100644 index 0000000..6071b50 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/jsonp.go @@ -0,0 +1,116 @@ +package rest + +import ( + "bufio" + "net" + "net/http" +) + +// JsonpMiddleware provides JSONP responses on demand, based on the presence +// of a query string argument specifying the callback name. +type JsonpMiddleware struct { + + // Name of the query string parameter used to specify the + // the name of the JS callback used for the padding. + // Defaults to "callback". + CallbackNameKey string +} + +// MiddlewareFunc returns a HandlerFunc that implements the middleware. +func (mw *JsonpMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + if mw.CallbackNameKey == "" { + mw.CallbackNameKey = "callback" + } + + return func(w ResponseWriter, r *Request) { + + callbackName := r.URL.Query().Get(mw.CallbackNameKey) + // TODO validate the callbackName ? + + if callbackName != "" { + // the client request JSONP, instantiate JsonpMiddleware. + writer := &jsonpResponseWriter{w, false, callbackName} + // call the handler with the wrapped writer + h(writer, r) + } else { + // do nothing special + h(w, r) + } + + } +} + +// Private responseWriter intantiated by the JSONP middleware. +// It adds the padding to the payload and set the proper headers. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type jsonpResponseWriter struct { + ResponseWriter + wroteHeader bool + callbackName string +} + +// Overwrite the Content-Type to be text/javascript +func (w *jsonpResponseWriter) WriteHeader(code int) { + + w.Header().Set("Content-Type", "text/javascript") + + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *jsonpResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + // JSONP security fix (http://miki.it/blog/2014/7/8/abusing-jsonp-with-rosetta-flash/) + w.Header().Set("Content-Disposition", "filename=f.txt") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Write([]byte("/**/" + w.callbackName + "(")) + w.Write(b) + w.Write([]byte(")")) + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *jsonpResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *jsonpResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *jsonpResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called. +// Provided in order to implement the http.ResponseWriter interface. +func (w *jsonpResponseWriter) Write(b []byte) (int, error) { + + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + writer := w.ResponseWriter.(http.ResponseWriter) + + return writer.Write(b) +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go b/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go new file mode 100644 index 0000000..ba03fb8 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/middleware.go @@ -0,0 +1,72 @@ +package rest + +import ( + "net/http" +) + +// HandlerFunc defines the handler function. It is the go-json-rest equivalent of http.HandlerFunc. +type HandlerFunc func(ResponseWriter, *Request) + +// App defines the interface that an object should implement to be used as an app in this framework +// stack. The App is the top element of the stack, the other elements being middlewares. +type App interface { + AppFunc() HandlerFunc +} + +// AppSimple is an adapter type that makes it easy to write an App with a simple function. +// eg: rest.NewApi(rest.AppSimple(func(w rest.ResponseWriter, r *rest.Request) { ... })) +type AppSimple HandlerFunc + +// AppFunc makes AppSimple implement the App interface. +func (as AppSimple) AppFunc() HandlerFunc { + return HandlerFunc(as) +} + +// Middleware defines the interface that objects must implement in order to wrap a HandlerFunc and +// be used in the middleware stack. +type Middleware interface { + MiddlewareFunc(handler HandlerFunc) HandlerFunc +} + +// MiddlewareSimple is an adapter type that makes it easy to write a Middleware with a simple +// function. eg: api.Use(rest.MiddlewareSimple(func(h HandlerFunc) Handlerfunc { ... })) +type MiddlewareSimple func(handler HandlerFunc) HandlerFunc + +// MiddlewareFunc makes MiddlewareSimple implement the Middleware interface. +func (ms MiddlewareSimple) MiddlewareFunc(handler HandlerFunc) HandlerFunc { + return ms(handler) +} + +// WrapMiddlewares calls the MiddlewareFunc methods in the reverse order and returns an HandlerFunc +// ready to be executed. This can be used to wrap a set of middlewares, post routing, on a per Route +// basis. +func WrapMiddlewares(middlewares []Middleware, handler HandlerFunc) HandlerFunc { + wrapped := handler + for i := len(middlewares) - 1; i >= 0; i-- { + wrapped = middlewares[i].MiddlewareFunc(wrapped) + } + return wrapped +} + +// Handle the transition between net/http and go-json-rest objects. +// It intanciates the rest.Request and rest.ResponseWriter, ... +func adapterFunc(handler HandlerFunc) http.HandlerFunc { + + return func(origWriter http.ResponseWriter, origRequest *http.Request) { + + // instantiate the rest objects + request := &Request{ + origRequest, + nil, + map[string]interface{}{}, + } + + writer := &responseWriter{ + origWriter, + false, + } + + // call the wrapped handler + handler(writer, request) + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go b/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go new file mode 100644 index 0000000..3b22ccf --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/powered_by.go @@ -0,0 +1,29 @@ +package rest + +const xPoweredByDefault = "go-json-rest" + +// PoweredByMiddleware adds the "X-Powered-By" header to the HTTP response. +type PoweredByMiddleware struct { + + // If specified, used as the value for the "X-Powered-By" response header. + // Defaults to "go-json-rest". + XPoweredBy string +} + +// MiddlewareFunc makes PoweredByMiddleware implement the Middleware interface. +func (mw *PoweredByMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + poweredBy := xPoweredByDefault + if mw.XPoweredBy != "" { + poweredBy = mw.XPoweredBy + } + + return func(w ResponseWriter, r *Request) { + + w.Header().Add("X-Powered-By", poweredBy) + + // call the handler + h(w, r) + + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go b/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go new file mode 100644 index 0000000..20502e9 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/recorder.go @@ -0,0 +1,100 @@ +package rest + +import ( + "bufio" + "net" + "net/http" +) + +// RecorderMiddleware keeps a record of the HTTP status code of the response, +// and the number of bytes written. +// The result is available to the wrapping handlers as request.Env["STATUS_CODE"].(int), +// and as request.Env["BYTES_WRITTEN"].(int64) +type RecorderMiddleware struct{} + +// MiddlewareFunc makes RecorderMiddleware implement the Middleware interface. +func (mw *RecorderMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + writer := &recorderResponseWriter{w, 0, false, 0} + + // call the handler + h(writer, r) + + r.Env["STATUS_CODE"] = writer.statusCode + r.Env["BYTES_WRITTEN"] = writer.bytesWritten + } +} + +// Private responseWriter intantiated by the recorder middleware. +// It keeps a record of the HTTP status code of the response. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type recorderResponseWriter struct { + ResponseWriter + statusCode int + wroteHeader bool + bytesWritten int64 +} + +// Record the status code. +func (w *recorderResponseWriter) WriteHeader(code int) { + w.ResponseWriter.WriteHeader(code) + if w.wroteHeader { + return + } + w.statusCode = code + w.wroteHeader = true +} + +// Make sure the local Write is called. +func (w *recorderResponseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Make sure the local WriteHeader is called, and call the parent Flush. +// Provided in order to implement the http.Flusher interface. +func (w *recorderResponseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Call the parent CloseNotify. +// Provided in order to implement the http.CloseNotifier interface. +func (w *recorderResponseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *recorderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} + +// Make sure the local WriteHeader is called, and call the parent Write. +// Provided in order to implement the http.ResponseWriter interface. +func (w *recorderResponseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + writer := w.ResponseWriter.(http.ResponseWriter) + written, err := writer.Write(b) + w.bytesWritten += int64(written) + return written, err +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/recover.go b/vendor/github.com/ant0ine/go-json-rest/rest/recover.go new file mode 100644 index 0000000..99f1515 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/recover.go @@ -0,0 +1,74 @@ +package rest + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "runtime/debug" +) + +// RecoverMiddleware catches the panic errors that occur in the wrapped HandleFunc, +// and convert them to 500 responses. +type RecoverMiddleware struct { + + // Custom logger used for logging the panic errors, + // optional, defaults to log.New(os.Stderr, "", 0) + Logger *log.Logger + + // If true, the log records will be printed as JSON. Convenient for log parsing. + EnableLogAsJson bool + + // If true, when a "panic" happens, the error string and the stack trace will be + // printed in the 500 response body. + EnableResponseStackTrace bool +} + +// MiddlewareFunc makes RecoverMiddleware implement the Middleware interface. +func (mw *RecoverMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + // set the default Logger + if mw.Logger == nil { + mw.Logger = log.New(os.Stderr, "", 0) + } + + return func(w ResponseWriter, r *Request) { + + // catch user code's panic, and convert to http response + defer func() { + if reco := recover(); reco != nil { + trace := debug.Stack() + + // log the trace + message := fmt.Sprintf("%s\n%s", reco, trace) + mw.logError(message) + + // write error response + if mw.EnableResponseStackTrace { + Error(w, message, http.StatusInternalServerError) + } else { + Error(w, "Internal Server Error", http.StatusInternalServerError) + } + } + }() + + // call the handler + h(w, r) + } +} + +func (mw *RecoverMiddleware) logError(message string) { + if mw.EnableLogAsJson { + record := map[string]string{ + "error": message, + } + b, err := json.Marshal(&record) + if err != nil { + panic(err) + } + mw.Logger.Printf("%s", b) + } else { + mw.Logger.Print(message) + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/request.go b/vendor/github.com/ant0ine/go-json-rest/rest/request.go new file mode 100644 index 0000000..c4eb381 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/request.go @@ -0,0 +1,148 @@ +package rest + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +var ( + // ErrJsonPayloadEmpty is returned when the JSON payload is empty. + ErrJsonPayloadEmpty = errors.New("JSON payload is empty") +) + +// Request inherits from http.Request, and provides additional methods. +type Request struct { + *http.Request + + // Map of parameters that have been matched in the URL Path. + PathParams map[string]string + + // Environment used by middlewares to communicate. + Env map[string]interface{} +} + +// PathParam provides a convenient access to the PathParams map. +func (r *Request) PathParam(name string) string { + return r.PathParams[name] +} + +// DecodeJsonPayload reads the request body and decodes the JSON using json.Unmarshal. +func (r *Request) DecodeJsonPayload(v interface{}) error { + content, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return err + } + if len(content) == 0 { + return ErrJsonPayloadEmpty + } + err = json.Unmarshal(content, v) + if err != nil { + return err + } + return nil +} + +// BaseUrl returns a new URL object with the Host and Scheme taken from the request. +// (without the trailing slash in the host) +func (r *Request) BaseUrl() *url.URL { + scheme := r.URL.Scheme + if scheme == "" { + scheme = "http" + } + + // HTTP sometimes gives the default scheme as HTTP even when used with TLS + // Check if TLS is not nil and given back https scheme + if scheme == "http" && r.TLS != nil { + scheme = "https" + } + + host := r.Host + if len(host) > 0 && host[len(host)-1] == '/' { + host = host[:len(host)-1] + } + + return &url.URL{ + Scheme: scheme, + Host: host, + } +} + +// UrlFor returns the URL object from UriBase with the Path set to path, and the query +// string built with queryParams. +func (r *Request) UrlFor(path string, queryParams map[string][]string) *url.URL { + baseUrl := r.BaseUrl() + baseUrl.Path = path + if queryParams != nil { + query := url.Values{} + for k, v := range queryParams { + for _, vv := range v { + query.Add(k, vv) + } + } + baseUrl.RawQuery = query.Encode() + } + return baseUrl +} + +// CorsInfo contains the CORS request info derived from a rest.Request. +type CorsInfo struct { + IsCors bool + IsPreflight bool + Origin string + OriginUrl *url.URL + + // The header value is converted to uppercase to avoid common mistakes. + AccessControlRequestMethod string + + // The header values are normalized with http.CanonicalHeaderKey. + AccessControlRequestHeaders []string +} + +// GetCorsInfo derives CorsInfo from Request. +func (r *Request) GetCorsInfo() *CorsInfo { + + origin := r.Header.Get("Origin") + + var originUrl *url.URL + var isCors bool + + if origin == "" { + isCors = false + } else if origin == "null" { + isCors = true + } else { + var err error + originUrl, err = url.ParseRequestURI(origin) + isCors = err == nil && r.Host != originUrl.Host + } + + reqMethod := r.Header.Get("Access-Control-Request-Method") + + reqHeaders := []string{} + rawReqHeaders := r.Header[http.CanonicalHeaderKey("Access-Control-Request-Headers")] + for _, rawReqHeader := range rawReqHeaders { + if len(rawReqHeader) == 0 { + continue + } + // net/http does not handle comma delimited headers for us + for _, reqHeader := range strings.Split(rawReqHeader, ",") { + reqHeaders = append(reqHeaders, http.CanonicalHeaderKey(strings.TrimSpace(reqHeader))) + } + } + + isPreflight := isCors && r.Method == "OPTIONS" && reqMethod != "" + + return &CorsInfo{ + IsCors: isCors, + IsPreflight: isPreflight, + Origin: origin, + OriginUrl: originUrl, + AccessControlRequestMethod: strings.ToUpper(reqMethod), + AccessControlRequestHeaders: reqHeaders, + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/response.go b/vendor/github.com/ant0ine/go-json-rest/rest/response.go new file mode 100644 index 0000000..52529f1 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/response.go @@ -0,0 +1,127 @@ +package rest + +import ( + "bufio" + "encoding/json" + "net" + "net/http" +) + +// A ResponseWriter interface dedicated to JSON HTTP response. +// Note, the responseWriter object instantiated by the framework also implements many other interfaces +// accessible by type assertion: http.ResponseWriter, http.Flusher, http.CloseNotifier, http.Hijacker. +type ResponseWriter interface { + + // Identical to the http.ResponseWriter interface + Header() http.Header + + // Use EncodeJson to generate the payload, write the headers with http.StatusOK if + // they are not already written, then write the payload. + // The Content-Type header is set to "application/json", unless already specified. + WriteJson(v interface{}) error + + // Encode the data structure to JSON, mainly used to wrap ResponseWriter in + // middlewares. + EncodeJson(v interface{}) ([]byte, error) + + // Similar to the http.ResponseWriter interface, with additional JSON related + // headers set. + WriteHeader(int) +} + +// This allows to customize the field name used in the error response payload. +// It defaults to "Error" for compatibility reason, but can be changed before starting the server. +// eg: rest.ErrorFieldName = "errorMessage" +var ErrorFieldName = "Error" + +// Error produces an error response in JSON with the following structure, '{"Error":"My error message"}' +// The standard plain text net/http Error helper can still be called like this: +// http.Error(w, "error message", code) +func Error(w ResponseWriter, error string, code int) { + w.WriteHeader(code) + err := w.WriteJson(map[string]string{ErrorFieldName: error}) + if err != nil { + panic(err) + } +} + +// NotFound produces a 404 response with the following JSON, '{"Error":"Resource not found"}' +// The standard plain text net/http NotFound helper can still be called like this: +// http.NotFound(w, r.Request) +func NotFound(w ResponseWriter, r *Request) { + Error(w, "Resource not found", http.StatusNotFound) +} + +// Private responseWriter intantiated by the resource handler. +// It implements the following interfaces: +// ResponseWriter +// http.ResponseWriter +// http.Flusher +// http.CloseNotifier +// http.Hijacker +type responseWriter struct { + http.ResponseWriter + wroteHeader bool +} + +func (w *responseWriter) WriteHeader(code int) { + if w.Header().Get("Content-Type") == "" { + // Per spec, UTF-8 is the default, and the charset parameter should not + // be necessary. But some clients (eg: Chrome) think otherwise. + // Since json.Marshal produces UTF-8, setting the charset parameter is a + // safe option. + w.Header().Set("Content-Type", "application/json; charset=utf-8") + } + w.ResponseWriter.WriteHeader(code) + w.wroteHeader = true +} + +func (w *responseWriter) EncodeJson(v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Encode the object in JSON and call Write. +func (w *responseWriter) WriteJson(v interface{}) error { + b, err := w.EncodeJson(v) + if err != nil { + return err + } + _, err = w.Write(b) + if err != nil { + return err + } + return nil +} + +// Provided in order to implement the http.ResponseWriter interface. +func (w *responseWriter) Write(b []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + return w.ResponseWriter.Write(b) +} + +// Provided in order to implement the http.Flusher interface. +func (w *responseWriter) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + flusher := w.ResponseWriter.(http.Flusher) + flusher.Flush() +} + +// Provided in order to implement the http.CloseNotifier interface. +func (w *responseWriter) CloseNotify() <-chan bool { + notifier := w.ResponseWriter.(http.CloseNotifier) + return notifier.CloseNotify() +} + +// Provided in order to implement the http.Hijacker interface. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker := w.ResponseWriter.(http.Hijacker) + return hijacker.Hijack() +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/route.go b/vendor/github.com/ant0ine/go-json-rest/rest/route.go new file mode 100644 index 0000000..efb94a7 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/route.go @@ -0,0 +1,107 @@ +package rest + +import ( + "strings" +) + +// Route defines a route as consumed by the router. It can be instantiated directly, or using one +// of the shortcut methods: rest.Get, rest.Post, rest.Put, rest.Patch and rest.Delete. +type Route struct { + + // Any HTTP method. It will be used as uppercase to avoid common mistakes. + HttpMethod string + + // A string like "/resource/:id.json". + // Placeholders supported are: + // :paramName that matches any char to the first '/' or '.' + // #paramName that matches any char to the first '/' + // *paramName that matches everything to the end of the string + // (placeholder names must be unique per PathExp) + PathExp string + + // Code that will be executed when this route is taken. + Func HandlerFunc +} + +// MakePath generates the path corresponding to this Route and the provided path parameters. +// This is used for reverse route resolution. +func (route *Route) MakePath(pathParams map[string]string) string { + path := route.PathExp + for paramName, paramValue := range pathParams { + paramPlaceholder := ":" + paramName + relaxedPlaceholder := "#" + paramName + splatPlaceholder := "*" + paramName + r := strings.NewReplacer(paramPlaceholder, paramValue, splatPlaceholder, paramValue, relaxedPlaceholder, paramValue) + path = r.Replace(path) + } + return path +} + +// Head is a shortcut method that instantiates a HEAD route. See the Route object the parameters definitions. +// Equivalent to &Route{"HEAD", pathExp, handlerFunc} +func Head(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "HEAD", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Get is a shortcut method that instantiates a GET route. See the Route object the parameters definitions. +// Equivalent to &Route{"GET", pathExp, handlerFunc} +func Get(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "GET", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Post is a shortcut method that instantiates a POST route. See the Route object the parameters definitions. +// Equivalent to &Route{"POST", pathExp, handlerFunc} +func Post(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "POST", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Put is a shortcut method that instantiates a PUT route. See the Route object the parameters definitions. +// Equivalent to &Route{"PUT", pathExp, handlerFunc} +func Put(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "PUT", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Patch is a shortcut method that instantiates a PATCH route. See the Route object the parameters definitions. +// Equivalent to &Route{"PATCH", pathExp, handlerFunc} +func Patch(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "PATCH", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Delete is a shortcut method that instantiates a DELETE route. Equivalent to &Route{"DELETE", pathExp, handlerFunc} +func Delete(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "DELETE", + PathExp: pathExp, + Func: handlerFunc, + } +} + +// Options is a shortcut method that instantiates an OPTIONS route. See the Route object the parameters definitions. +// Equivalent to &Route{"OPTIONS", pathExp, handlerFunc} +func Options(pathExp string, handlerFunc HandlerFunc) *Route { + return &Route{ + HttpMethod: "OPTIONS", + PathExp: pathExp, + Func: handlerFunc, + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/router.go b/vendor/github.com/ant0ine/go-json-rest/rest/router.go new file mode 100644 index 0000000..f7ab713 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/router.go @@ -0,0 +1,194 @@ +package rest + +import ( + "errors" + "github.com/ant0ine/go-json-rest/rest/trie" + "net/http" + "net/url" + "strings" +) + +type router struct { + Routes []*Route + + disableTrieCompression bool + index map[*Route]int + trie *trie.Trie +} + +// MakeRouter returns the router app. Given a set of Routes, it dispatches the request to the +// HandlerFunc of the first route that matches. The order of the Routes matters. +func MakeRouter(routes ...*Route) (App, error) { + r := &router{ + Routes: routes, + } + err := r.start() + if err != nil { + return nil, err + } + return r, nil +} + +// Handle the REST routing and run the user code. +func (rt *router) AppFunc() HandlerFunc { + return func(writer ResponseWriter, request *Request) { + + // find the route + route, params, pathMatched := rt.findRouteFromURL(request.Method, request.URL) + if route == nil { + + if pathMatched { + // no route found, but path was matched: 405 Method Not Allowed + Error(writer, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // no route found, the path was not matched: 404 Not Found + NotFound(writer, request) + return + } + + // a route was found, set the PathParams + request.PathParams = params + + // run the user code + handler := route.Func + handler(writer, request) + } +} + +// This is run for each new request, perf is important. +func escapedPath(urlObj *url.URL) string { + // the escape method of url.URL should be public + // that would avoid this split. + parts := strings.SplitN(urlObj.RequestURI(), "?", 2) + return parts[0] +} + +var preEscape = strings.NewReplacer("*", "__SPLAT_PLACEHOLDER__", "#", "__RELAXED_PLACEHOLDER__") + +var postEscape = strings.NewReplacer("__SPLAT_PLACEHOLDER__", "*", "__RELAXED_PLACEHOLDER__", "#") + +// This is run at init time only. +func escapedPathExp(pathExp string) (string, error) { + + // PathExp validation + if pathExp == "" { + return "", errors.New("empty PathExp") + } + if pathExp[0] != '/' { + return "", errors.New("PathExp must start with /") + } + if strings.Contains(pathExp, "?") { + return "", errors.New("PathExp must not contain the query string") + } + + // Get the right escaping + // XXX a bit hacky + + pathExp = preEscape.Replace(pathExp) + + urlObj, err := url.Parse(pathExp) + if err != nil { + return "", err + } + + // get the same escaping as find requests + pathExp = urlObj.RequestURI() + + pathExp = postEscape.Replace(pathExp) + + return pathExp, nil +} + +// This validates the Routes and prepares the Trie data structure. +// It must be called once the Routes are defined and before trying to find Routes. +// The order matters, if multiple Routes match, the first defined will be used. +func (rt *router) start() error { + + rt.trie = trie.New() + rt.index = map[*Route]int{} + + for i, route := range rt.Routes { + + // work with the PathExp urlencoded. + pathExp, err := escapedPathExp(route.PathExp) + if err != nil { + return err + } + + // insert in the Trie + err = rt.trie.AddRoute( + strings.ToUpper(route.HttpMethod), // work with the HttpMethod in uppercase + pathExp, + route, + ) + if err != nil { + return err + } + + // index + rt.index[route] = i + } + + if rt.disableTrieCompression == false { + rt.trie.Compress() + } + + return nil +} + +// return the result that has the route defined the earliest +func (rt *router) ofFirstDefinedRoute(matches []*trie.Match) *trie.Match { + minIndex := -1 + var bestMatch *trie.Match + + for _, result := range matches { + route := result.Route.(*Route) + routeIndex := rt.index[route] + if minIndex == -1 || routeIndex < minIndex { + minIndex = routeIndex + bestMatch = result + } + } + + return bestMatch +} + +// Return the first matching Route and the corresponding parameters for a given URL object. +func (rt *router) findRouteFromURL(httpMethod string, urlObj *url.URL) (*Route, map[string]string, bool) { + + // lookup the routes in the Trie + matches, pathMatched := rt.trie.FindRoutesAndPathMatched( + strings.ToUpper(httpMethod), // work with the httpMethod in uppercase + escapedPath(urlObj), // work with the path urlencoded + ) + + // short cuts + if len(matches) == 0 { + // no route found + return nil, nil, pathMatched + } + + if len(matches) == 1 { + // one route found + return matches[0].Route.(*Route), matches[0].Params, pathMatched + } + + // multiple routes found, pick the first defined + result := rt.ofFirstDefinedRoute(matches) + return result.Route.(*Route), result.Params, pathMatched +} + +// Parse the url string (complete or just the path) and return the first matching Route and the corresponding parameters. +func (rt *router) findRoute(httpMethod, urlStr string) (*Route, map[string]string, bool, error) { + + // parse the url + urlObj, err := url.Parse(urlStr) + if err != nil { + return nil, nil, false, err + } + + route, params, pathMatched := rt.findRouteFromURL(httpMethod, urlObj) + return route, params, pathMatched, nil +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/status.go b/vendor/github.com/ant0ine/go-json-rest/rest/status.go new file mode 100644 index 0000000..6b6b5d1 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/status.go @@ -0,0 +1,129 @@ +package rest + +import ( + "fmt" + "log" + "os" + "sync" + "time" +) + +// StatusMiddleware keeps track of various stats about the processed requests. +// It depends on request.Env["STATUS_CODE"] and request.Env["ELAPSED_TIME"], +// recorderMiddleware and timerMiddleware must be in the wrapped middlewares. +type StatusMiddleware struct { + lock sync.RWMutex + start time.Time + pid int + responseCounts map[string]int + totalResponseTime time.Time +} + +// MiddlewareFunc makes StatusMiddleware implement the Middleware interface. +func (mw *StatusMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + + mw.start = time.Now() + mw.pid = os.Getpid() + mw.responseCounts = map[string]int{} + mw.totalResponseTime = time.Time{} + + return func(w ResponseWriter, r *Request) { + + // call the handler + h(w, r) + + if r.Env["STATUS_CODE"] == nil { + log.Fatal("StatusMiddleware: Env[\"STATUS_CODE\"] is nil, " + + "RecorderMiddleware may not be in the wrapped Middlewares.") + } + statusCode := r.Env["STATUS_CODE"].(int) + + if r.Env["ELAPSED_TIME"] == nil { + log.Fatal("StatusMiddleware: Env[\"ELAPSED_TIME\"] is nil, " + + "TimerMiddleware may not be in the wrapped Middlewares.") + } + responseTime := r.Env["ELAPSED_TIME"].(*time.Duration) + + mw.lock.Lock() + mw.responseCounts[fmt.Sprintf("%d", statusCode)]++ + mw.totalResponseTime = mw.totalResponseTime.Add(*responseTime) + mw.lock.Unlock() + } +} + +// Status contains stats and status information. It is returned by GetStatus. +// These information can be made available as an API endpoint, see the "status" +// example to install the following status route. +// GET /.status returns something like: +// +// { +// "Pid": 21732, +// "UpTime": "1m15.926272s", +// "UpTimeSec": 75.926272, +// "Time": "2013-03-04 08:00:27.152986 +0000 UTC", +// "TimeUnix": 1362384027, +// "StatusCodeCount": { +// "200": 53, +// "404": 11 +// }, +// "TotalCount": 64, +// "TotalResponseTime": "16.777ms", +// "TotalResponseTimeSec": 0.016777, +// "AverageResponseTime": "262.14us", +// "AverageResponseTimeSec": 0.00026214 +// } +type Status struct { + Pid int + UpTime string + UpTimeSec float64 + Time string + TimeUnix int64 + StatusCodeCount map[string]int + TotalCount int + TotalResponseTime string + TotalResponseTimeSec float64 + AverageResponseTime string + AverageResponseTimeSec float64 +} + +// GetStatus computes and returns a Status object based on the request informations accumulated +// since the start of the process. +func (mw *StatusMiddleware) GetStatus() *Status { + + mw.lock.RLock() + + now := time.Now() + + uptime := now.Sub(mw.start) + + totalCount := 0 + for _, count := range mw.responseCounts { + totalCount += count + } + + totalResponseTime := mw.totalResponseTime.Sub(time.Time{}) + + averageResponseTime := time.Duration(0) + if totalCount > 0 { + avgNs := int64(totalResponseTime) / int64(totalCount) + averageResponseTime = time.Duration(avgNs) + } + + status := &Status{ + Pid: mw.pid, + UpTime: uptime.String(), + UpTimeSec: uptime.Seconds(), + Time: now.String(), + TimeUnix: now.Unix(), + StatusCodeCount: mw.responseCounts, + TotalCount: totalCount, + TotalResponseTime: totalResponseTime.String(), + TotalResponseTimeSec: totalResponseTime.Seconds(), + AverageResponseTime: averageResponseTime.String(), + AverageResponseTimeSec: averageResponseTime.Seconds(), + } + + mw.lock.RUnlock() + + return status +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/timer.go b/vendor/github.com/ant0ine/go-json-rest/rest/timer.go new file mode 100644 index 0000000..b2616c8 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/timer.go @@ -0,0 +1,26 @@ +package rest + +import ( + "time" +) + +// TimerMiddleware computes the elapsed time spent during the execution of the wrapped handler. +// The result is available to the wrapping handlers as request.Env["ELAPSED_TIME"].(*time.Duration), +// and as request.Env["START_TIME"].(*time.Time) +type TimerMiddleware struct{} + +// MiddlewareFunc makes TimerMiddleware implement the Middleware interface. +func (mw *TimerMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc { + return func(w ResponseWriter, r *Request) { + + start := time.Now() + r.Env["START_TIME"] = &start + + // call the handler + h(w, r) + + end := time.Now() + elapsed := end.Sub(start) + r.Env["ELAPSED_TIME"] = &elapsed + } +} diff --git a/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go b/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go new file mode 100644 index 0000000..2a9fff1 --- /dev/null +++ b/vendor/github.com/ant0ine/go-json-rest/rest/trie/impl.go @@ -0,0 +1,426 @@ +// Special Trie implementation for HTTP routing. +// +// This Trie implementation is designed to support strings that includes +// :param and *splat parameters. Strings that are commonly used to represent +// the Path in HTTP routing. This implementation also maintain for each Path +// a map of HTTP Methods associated with the Route. +// +// You probably don't need to use this package directly. +// +package trie + +import ( + "errors" + "fmt" +) + +func splitParam(remaining string) (string, string) { + i := 0 + for len(remaining) > i && remaining[i] != '/' && remaining[i] != '.' { + i++ + } + return remaining[:i], remaining[i:] +} + +func splitRelaxed(remaining string) (string, string) { + i := 0 + for len(remaining) > i && remaining[i] != '/' { + i++ + } + return remaining[:i], remaining[i:] +} + +type node struct { + HttpMethodToRoute map[string]interface{} + + Children map[string]*node + ChildrenKeyLen int + + ParamChild *node + ParamName string + + RelaxedChild *node + RelaxedName string + + SplatChild *node + SplatName string +} + +func (n *node) addRoute(httpMethod, pathExp string, route interface{}, usedParams []string) error { + + if len(pathExp) == 0 { + // end of the path, leaf node, update the map + if n.HttpMethodToRoute == nil { + n.HttpMethodToRoute = map[string]interface{}{ + httpMethod: route, + } + return nil + } else { + if n.HttpMethodToRoute[httpMethod] != nil { + return errors.New("node.Route already set, duplicated path and method") + } + n.HttpMethodToRoute[httpMethod] = route + return nil + } + } + + token := pathExp[0:1] + remaining := pathExp[1:] + var nextNode *node + + if token[0] == ':' { + // :param case + var name string + name, remaining = splitParam(remaining) + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + usedParams = append(usedParams, name) + + if n.ParamChild == nil { + n.ParamChild = &node{} + n.ParamName = name + } else { + if n.ParamName != name { + return errors.New( + fmt.Sprintf( + "Routes sharing a common placeholder MUST name it consistently: %s != %s", + n.ParamName, + name, + ), + ) + } + } + nextNode = n.ParamChild + } else if token[0] == '#' { + // #param case + var name string + name, remaining = splitRelaxed(remaining) + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + usedParams = append(usedParams, name) + + if n.RelaxedChild == nil { + n.RelaxedChild = &node{} + n.RelaxedName = name + } else { + if n.RelaxedName != name { + return errors.New( + fmt.Sprintf( + "Routes sharing a common placeholder MUST name it consistently: %s != %s", + n.RelaxedName, + name, + ), + ) + } + } + nextNode = n.RelaxedChild + } else if token[0] == '*' { + // *splat case + name := remaining + remaining = "" + + // Check param name is unique + for _, e := range usedParams { + if e == name { + return errors.New( + fmt.Sprintf("A route can't have two placeholders with the same name: %s", name), + ) + } + } + + if n.SplatChild == nil { + n.SplatChild = &node{} + n.SplatName = name + } + nextNode = n.SplatChild + } else { + // general case + if n.Children == nil { + n.Children = map[string]*node{} + n.ChildrenKeyLen = 1 + } + if n.Children[token] == nil { + n.Children[token] = &node{} + } + nextNode = n.Children[token] + } + + return nextNode.addRoute(httpMethod, remaining, route, usedParams) +} + +func (n *node) compress() { + // *splat branch + if n.SplatChild != nil { + n.SplatChild.compress() + } + // :param branch + if n.ParamChild != nil { + n.ParamChild.compress() + } + // #param branch + if n.RelaxedChild != nil { + n.RelaxedChild.compress() + } + // main branch + if len(n.Children) == 0 { + return + } + // compressable ? + canCompress := true + for _, node := range n.Children { + if node.HttpMethodToRoute != nil || node.SplatChild != nil || node.ParamChild != nil || node.RelaxedChild != nil { + canCompress = false + } + } + // compress + if canCompress { + merged := map[string]*node{} + for key, node := range n.Children { + for gdKey, gdNode := range node.Children { + mergedKey := key + gdKey + merged[mergedKey] = gdNode + } + } + n.Children = merged + n.ChildrenKeyLen++ + n.compress() + // continue + } else { + for _, node := range n.Children { + node.compress() + } + } +} + +func printFPadding(padding int, format string, a ...interface{}) { + for i := 0; i < padding; i++ { + fmt.Print(" ") + } + fmt.Printf(format, a...) +} + +// Private function for now +func (n *node) printDebug(level int) { + level++ + // *splat branch + if n.SplatChild != nil { + printFPadding(level, "*splat\n") + n.SplatChild.printDebug(level) + } + // :param branch + if n.ParamChild != nil { + printFPadding(level, ":param\n") + n.ParamChild.printDebug(level) + } + // #param branch + if n.RelaxedChild != nil { + printFPadding(level, "#relaxed\n") + n.RelaxedChild.printDebug(level) + } + // main branch + for key, node := range n.Children { + printFPadding(level, "\"%s\"\n", key) + node.printDebug(level) + } +} + +// utility for the node.findRoutes recursive method + +type paramMatch struct { + name string + value string +} + +type findContext struct { + paramStack []paramMatch + matchFunc func(httpMethod, path string, node *node) +} + +func newFindContext() *findContext { + return &findContext{ + paramStack: []paramMatch{}, + } +} + +func (fc *findContext) pushParams(name, value string) { + fc.paramStack = append( + fc.paramStack, + paramMatch{name, value}, + ) +} + +func (fc *findContext) popParams() { + fc.paramStack = fc.paramStack[:len(fc.paramStack)-1] +} + +func (fc *findContext) paramsAsMap() map[string]string { + r := map[string]string{} + for _, param := range fc.paramStack { + if r[param.name] != "" { + // this is checked at addRoute time, and should never happen. + panic(fmt.Sprintf( + "placeholder %s already found, placeholder names should be unique per route", + param.name, + )) + } + r[param.name] = param.value + } + return r +} + +type Match struct { + // Same Route as in AddRoute + Route interface{} + // map of params matched for this result + Params map[string]string +} + +func (n *node) find(httpMethod, path string, context *findContext) { + + if n.HttpMethodToRoute != nil && path == "" { + context.matchFunc(httpMethod, path, n) + } + + if len(path) == 0 { + return + } + + // *splat branch + if n.SplatChild != nil { + context.pushParams(n.SplatName, path) + n.SplatChild.find(httpMethod, "", context) + context.popParams() + } + + // :param branch + if n.ParamChild != nil { + value, remaining := splitParam(path) + context.pushParams(n.ParamName, value) + n.ParamChild.find(httpMethod, remaining, context) + context.popParams() + } + + // #param branch + if n.RelaxedChild != nil { + value, remaining := splitRelaxed(path) + context.pushParams(n.RelaxedName, value) + n.RelaxedChild.find(httpMethod, remaining, context) + context.popParams() + } + + // main branch + length := n.ChildrenKeyLen + if len(path) < length { + return + } + token := path[0:length] + remaining := path[length:] + if n.Children[token] != nil { + n.Children[token].find(httpMethod, remaining, context) + } +} + +type Trie struct { + root *node +} + +// Instanciate a Trie with an empty node as the root. +func New() *Trie { + return &Trie{ + root: &node{}, + } +} + +// Insert the route in the Trie following or creating the nodes corresponding to the path. +func (t *Trie) AddRoute(httpMethod, pathExp string, route interface{}) error { + return t.root.addRoute(httpMethod, pathExp, route, []string{}) +} + +// Reduce the size of the tree, must be done after the last AddRoute. +func (t *Trie) Compress() { + t.root.compress() +} + +// Private function for now. +func (t *Trie) printDebug() { + fmt.Print("\n") + t.root.printDebug(0) + fmt.Print("\n") +} + +// Given a path and an http method, return all the matching routes. +func (t *Trie) FindRoutes(httpMethod, path string) []*Match { + context := newFindContext() + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + if node.HttpMethodToRoute[httpMethod] != nil { + // path and method match, found a route ! + matches = append( + matches, + &Match{ + Route: node.HttpMethodToRoute[httpMethod], + Params: context.paramsAsMap(), + }, + ) + } + } + t.root.find(httpMethod, path, context) + return matches +} + +// Same as FindRoutes, but return in addition a boolean indicating if the path was matched. +// Useful to return 405 +func (t *Trie) FindRoutesAndPathMatched(httpMethod, path string) ([]*Match, bool) { + context := newFindContext() + pathMatched := false + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + pathMatched = true + if node.HttpMethodToRoute[httpMethod] != nil { + // path and method match, found a route ! + matches = append( + matches, + &Match{ + Route: node.HttpMethodToRoute[httpMethod], + Params: context.paramsAsMap(), + }, + ) + } + } + t.root.find(httpMethod, path, context) + return matches, pathMatched +} + +// Given a path, and whatever the http method, return all the matching routes. +func (t *Trie) FindRoutesForPath(path string) []*Match { + context := newFindContext() + matches := []*Match{} + context.matchFunc = func(httpMethod, path string, node *node) { + params := context.paramsAsMap() + for _, route := range node.HttpMethodToRoute { + matches = append( + matches, + &Match{ + Route: route, + Params: params, + }, + ) + } + } + t.root.find("", path, context) + return matches +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..c836416 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..8a4a658 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..1fe3cf3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..7c519ff --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..df1d582 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..c49875b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 0000000..cf68a3c --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,80 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Achille Roussel +Arne Hormann +Asta Xie +Bulat Gaifullin +Carlos Nieto +Chris Moos +Daniel Montoya +Daniel Nichter +Daniël van Eeden +Dave Protasowski +DisposaBoy +Egor Smolyakov +Evan Shaw +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +ICHINOSE Shogo +INADA Naoki +Jacek Szwec +James Harr +Jeff Hodges +Jeffrey Charles +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Justin Li +Justin Nuß +Kamil Dziedzic +Kevin Malachowski +Lennart Rudolph +Leonardo YongUk Kim +Linh Tran Tuan +Lion Yang +Luca Looz +Lucas Liu +Luke Scott +Maciej Zimnoch +Michael Woolnough +Nicola Peduzzi +Olivier Mengué +oscarzhao +Paul Bonser +Peter Schultz +Rebecca Chin +Runrioter Wung +Robert Russell +Shuode Li +Soroush Pour +Stan Putrya +Stanley Gunawan +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Zhenye Xie + +# Organizations + +Barracuda Networks, Inc. +Counting Ltd. +Google Inc. +Keybase Inc. +Pivotal Inc. +Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 0000000..6bcad7e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,119 @@ +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 0000000..8fe16bc --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 0000000..14e2f77 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 0000000..299198d --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,476 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [Connection pool and timeouts](#connection-pool-and-timeouts) + * [context.Context Support](#contextcontext-support) + * [ColumnType Support](#columntype-support) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.7 or higher. We aim to support the 3 latest versions of Go. + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get -u github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host[:port]`. +If `port` is omitted, the default port will be used. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: true +``` +`allowNativePasswords=false` disallows the usage of MySQL native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 4194304 +``` + +Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `readTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `rejectReadOnly` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + + +`rejectReadOnly=true` causes the driver to reject read-only connections. This +is for a possible race condition during an automatic failover, where the mysql +client gets connected to a read-only replica after the failover. + +Note that this should be a fairly rare case, as an automatic failover normally +happens when the primary is down, and the race condition shouldn't happen +unless it comes back up online as soon as the failover is kicked off. On the +other hand, when this happens, a MySQL application can get stuck on a +read-only connection until restarted. It is however fairly easy to reproduce, +for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. + +If you are not relying on read-only transactions to reject writes that aren't +supposed to happen, setting this on some MySQL providers (such as AWS Aurora) +is safer for failovers. + +Note that ERROR 1290 can be returned for a `read-only` server and this option will +cause a retry for that error. However the same error number is used for some +other cases. You should ensure your application will never cause an ERROR 1290 +except for `read-only` mode when enabling this option. + + +##### `timeout` + +``` +Type: duration +Default: OS default +``` + +Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### `writeTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with `'`. +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`). + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine (First Generation MySQL Server): +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +Google Cloud SQL on App Engine (Second Generation MySQL Server): +``` +user@cloudsql(project-id:regionname:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + + +### Connection pool and timeouts +The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. + +## `ColumnType` Support +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. + +## `context.Context` Support +Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. +See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. + + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially. + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. + +Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 0000000..be41f2e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "google.golang.org/appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 0000000..2001fea --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,147 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + nc net.Conn + idx int + length int + timeout time.Duration +} + +func newBuffer(nc net.Conn) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + nc: nc, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 0000000..82079cf --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8_general_ci" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 0000000..e570614 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,461 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "net" + "strconv" + "strings" + "time" +) + +// a copy of context.Context for Go 1.7 and earlier +type mysqlContext interface { + Done() <-chan struct{} + Err() error + + // defined in context.Context, but not used in this driver: + // Deadline() (deadline time.Time, ok bool) + // Value(key interface{}) interface{} +} + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + + // for context support (Go 1.8+) + watching bool + watcher chan<- mysqlContext + closech chan struct{} + finished chan<- struct{} + canceled atomicError // set non-nil if conn is canceled + closed atomicBool // set when conn is closed, before closech is closed +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.Params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) markBadConn(err error) error { + if mc == nil { + return err + } + if err != errBadConnNoWrite { + return err + } + return driver.ErrBadConn +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + return mc.begin(false) +} + +func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + var q string + if readOnly { + q = "START TRANSACTION READ ONLY" + } else { + q = "START TRANSACTION" + } + err := mc.exec(q) + if err == nil { + return &mysqlTx{mc}, err + } + return nil, mc.markBadConn(err) +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if !mc.closed.IsSet() { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + if !mc.closed.TrySet(true) { + return + } + + // Makes cleanup idempotent + close(mc.closech) + if mc.netConn == nil { + return + } + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } +} + +func (mc *mysqlConn) error() error { + if mc.closed.IsSet() { + if err := mc.canceled.Value(); err != nil { + return err + } + return ErrInvalidConn + } + return nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, mc.markBadConn(err) + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", ErrInvalidConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.Loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, mc.markBadConn(err) +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + if err := mc.writeCommandPacketStr(comQuery, query); err != nil { + return mc.markBadConn(err) + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + + return mc.discardResults() +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return mc.query(query, args) +} + +func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, mc.markBadConn(err) +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} + +// finish is called when the query has canceled. +func (mc *mysqlConn) cancel(err error) { + mc.canceled.Set(err) + mc.cleanup() +} + +// finish is called when the query has succeeded. +func (mc *mysqlConn) finish() { + if !mc.watching || mc.finished == nil { + return + } + select { + case mc.finished <- struct{}{}: + mc.watching = false + case <-mc.closech: + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go new file mode 100644 index 0000000..1306b70 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go @@ -0,0 +1,202 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// Ping implements driver.Pinger interface +func (mc *mysqlConn) Ping(ctx context.Context) error { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + if err := mc.watchCancel(ctx); err != nil { + return err + } + defer mc.finish() + + if err := mc.writeCommandPacket(comPing); err != nil { + return err + } + if _, err := mc.readResultOK(); err != nil { + return err + } + + return nil +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { + level, err := mapIsolationLevel(opts.Isolation) + if err != nil { + return nil, err + } + err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) + if err != nil { + return nil, err + } + } + + return mc.begin(opts.ReadOnly) +} + +func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + mc.finish() + return nil, err + } + rows.finish = mc.finish + return rows, err +} + +func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + return mc.Exec(query, dargs) +} + +func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + stmt, err := mc.Prepare(query) + mc.finish() + if err != nil { + return nil, err + } + + select { + default: + case <-ctx.Done(): + stmt.Close() + return nil, ctx.Err() + } + return stmt, nil +} + +func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + stmt.mc.finish() + return nil, err + } + rows.finish = stmt.mc.finish + return rows, err +} + +func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + defer stmt.mc.finish() + + return stmt.Exec(dargs) +} + +func (mc *mysqlConn) watchCancel(ctx context.Context) error { + if mc.watching { + // Reach here if canceled, + // so the connection is already invalid + mc.cleanup() + return nil + } + if ctx.Done() == nil { + return nil + } + + mc.watching = true + select { + default: + case <-ctx.Done(): + return ctx.Err() + } + if mc.watcher == nil { + return nil + } + + mc.watcher <- ctx + + return nil +} + +func (mc *mysqlConn) startWatcher() { + watcher := make(chan mysqlContext, 1) + mc.watcher = watcher + finished := make(chan struct{}) + mc.finished = finished + go func() { + for { + var ctx mysqlContext + select { + case ctx = <-watcher: + case <-mc.closech: + return + } + + select { + case <-ctx.Done(): + mc.cancel(ctx.Err()) + case <-finished: + case <-mc.closech: + return + } + } + }() +} + +func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 0000000..4a19ca5 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,166 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + defaultMaxAllowedPacket = 4 << 20 // 4 MiB + minProtocolVersion = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +type fieldType byte + +const ( + fieldTypeDecimal fieldType = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 0000000..d42ce7a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,193 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package. +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// watcher interface is used for context support (From Go 1.8) +type watcher interface { + startWatcher() +} + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + closech: make(chan struct{}), + } + mc.cfg, err = ParseDSN(dsn) + if err != nil { + return nil, err + } + mc.parseTime = mc.cfg.ParseTime + + // Connect to Server + if dial, ok := dials[mc.cfg.Net]; ok { + mc.netConn, err = dial(mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + // Call startWatcher for context support (From Go 1.8) + if s, ok := interface{}(mc).(watcher); ok { + s.startWatcher() + } + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.cleanup() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = handleAuthResult(mc, cipher); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func handleAuthResult(mc *mysqlConn, oldCipher []byte) error { + // Read Result Packet + cipher, err := mc.readResultOK() + if err == nil { + return nil // auth successful + } + + if mc.cfg == nil { + return err // auth failed and retry not possible + } + + // Retry auth if configured to do so. + if mc.cfg.AllowOldPasswords && err == ErrOldPassword { + // Retry with old authentication method. Note: there are edge cases + // where this should work but doesn't; this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if cipher == nil { + cipher = oldCipher + } + + if err = mc.writeOldAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword { + // Retry with clear text password for + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + if err = mc.writeClearAuthPacket(); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword { + if err = mc.writeNativeAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } + return err +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 0000000..47eab69 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,584 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + RejectReadOnly bool // Reject read-only connections +} + +// NewConfig creates a new Config and sets default values. +func NewConfig() *Config { + return &Config{ + Collation: defaultCollation, + Loc: time.UTC, + MaxAllowedPacket: defaultMaxAllowedPacket, + AllowNativePasswords: true, + } +} + +func (cfg *Config) normalize() error { + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return errors.New("default addr for network '" + cfg.Net + "' unknown") + } + + } else if cfg.Net == "tcp" { + cfg.Addr = ensureHavePort(cfg.Addr) + } + + if cfg.tls != nil { + if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + cfg.tls.ServerName = host + } + } + } + + return nil +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + if hasParam { + buf.WriteString("&allowCleartextPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowCleartextPasswords=true") + } + } + + if !cfg.AllowNativePasswords { + if hasParam { + buf.WriteString("&allowNativePasswords=false") + } else { + hasParam = true + buf.WriteString("?allowNativePasswords=false") + } + } + + if cfg.AllowOldPasswords { + if hasParam { + buf.WriteString("&allowOldPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowOldPasswords=true") + } + } + + if cfg.ClientFoundRows { + if hasParam { + buf.WriteString("&clientFoundRows=true") + } else { + hasParam = true + buf.WriteString("?clientFoundRows=true") + } + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + if hasParam { + buf.WriteString("&collation=") + } else { + hasParam = true + buf.WriteString("?collation=") + } + buf.WriteString(col) + } + + if cfg.ColumnsWithAlias { + if hasParam { + buf.WriteString("&columnsWithAlias=true") + } else { + hasParam = true + buf.WriteString("?columnsWithAlias=true") + } + } + + if cfg.InterpolateParams { + if hasParam { + buf.WriteString("&interpolateParams=true") + } else { + hasParam = true + buf.WriteString("?interpolateParams=true") + } + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + if hasParam { + buf.WriteString("&loc=") + } else { + hasParam = true + buf.WriteString("?loc=") + } + buf.WriteString(url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + if hasParam { + buf.WriteString("&multiStatements=true") + } else { + hasParam = true + buf.WriteString("?multiStatements=true") + } + } + + if cfg.ParseTime { + if hasParam { + buf.WriteString("&parseTime=true") + } else { + hasParam = true + buf.WriteString("?parseTime=true") + } + } + + if cfg.ReadTimeout > 0 { + if hasParam { + buf.WriteString("&readTimeout=") + } else { + hasParam = true + buf.WriteString("?readTimeout=") + } + buf.WriteString(cfg.ReadTimeout.String()) + } + + if cfg.RejectReadOnly { + if hasParam { + buf.WriteString("&rejectReadOnly=true") + } else { + hasParam = true + buf.WriteString("?rejectReadOnly=true") + } + } + + if cfg.Timeout > 0 { + if hasParam { + buf.WriteString("&timeout=") + } else { + hasParam = true + buf.WriteString("?timeout=") + } + buf.WriteString(cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + if hasParam { + buf.WriteString("&tls=") + } else { + hasParam = true + buf.WriteString("?tls=") + } + buf.WriteString(url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + if hasParam { + buf.WriteString("&writeTimeout=") + } else { + hasParam = true + buf.WriteString("?writeTimeout=") + } + buf.WriteString(cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { + if hasParam { + buf.WriteString("&maxAllowedPacket=") + } else { + hasParam = true + buf.WriteString("?maxAllowedPacket=") + } + buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket)) + + } + + // other params + if cfg.Params != nil { + var params []string + for param := range cfg.Params { + params = append(params, param) + } + sort.Strings(params) + for _, param := range params { + if hasParam { + buf.WriteByte('&') + } else { + hasParam = true + buf.WriteByte('?') + } + + buf.WriteString(param) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(cfg.Params[param])) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = NewConfig() + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if err = cfg.normalize(); err != nil { + return nil, err + } + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Reject read-only connections + case "rejectReadOnly": + var isBool bool + cfg.RejectReadOnly, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Strict mode + case "strict": + panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + cfg.tls = &tls.Config{} + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" { + cfg.TLSConfig = vl + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + + if tlsConfig := getTLSConfigClone(name); tlsConfig != nil { + cfg.TLSConfig = name + cfg.tls = tlsConfig + } else { + return errors.New("invalid value / unknown config name: " + name) + } + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +func ensureHavePort(addr string) string { + if _, _, err := net.SplitHostPort(addr); err != nil { + return net.JoinHostPort(addr, "3306") + } + return addr +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 0000000..760782f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,65 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "errors" + "fmt" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") + + // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. + // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn + // to trigger a resend. + // See https://github.com/go-sql-driver/mysql/pull/302 + errBadConnNoWrite = errors.New("bad connection") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go new file mode 100644 index 0000000..cded986 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/fields.go @@ -0,0 +1,140 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql" + "reflect" +) + +var typeDatabaseName = map[fieldType]string{ + fieldTypeBit: "BIT", + fieldTypeBLOB: "BLOB", + fieldTypeDate: "DATE", + fieldTypeDateTime: "DATETIME", + fieldTypeDecimal: "DECIMAL", + fieldTypeDouble: "DOUBLE", + fieldTypeEnum: "ENUM", + fieldTypeFloat: "FLOAT", + fieldTypeGeometry: "GEOMETRY", + fieldTypeInt24: "MEDIUMINT", + fieldTypeJSON: "JSON", + fieldTypeLong: "INT", + fieldTypeLongBLOB: "LONGBLOB", + fieldTypeLongLong: "BIGINT", + fieldTypeMediumBLOB: "MEDIUMBLOB", + fieldTypeNewDate: "DATE", + fieldTypeNewDecimal: "DECIMAL", + fieldTypeNULL: "NULL", + fieldTypeSet: "SET", + fieldTypeShort: "SMALLINT", + fieldTypeString: "CHAR", + fieldTypeTime: "TIME", + fieldTypeTimestamp: "TIMESTAMP", + fieldTypeTiny: "TINYINT", + fieldTypeTinyBLOB: "TINYBLOB", + fieldTypeVarChar: "VARCHAR", + fieldTypeVarString: "VARCHAR", + fieldTypeYear: "YEAR", +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(NullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +type mysqlField struct { + tableName string + name string + length uint32 + flags fieldFlag + fieldType fieldType + decimals byte +} + +func (mf *mysqlField) scanType() reflect.Type { + switch mf.fieldType { + case fieldTypeTiny: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint8 + } + return scanTypeInt8 + } + return scanTypeNullInt + + case fieldTypeShort, fieldTypeYear: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint16 + } + return scanTypeInt16 + } + return scanTypeNullInt + + case fieldTypeInt24, fieldTypeLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint32 + } + return scanTypeInt32 + } + return scanTypeNullInt + + case fieldTypeLongLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint64 + } + return scanTypeInt64 + } + return scanTypeNullInt + + case fieldTypeFloat: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat32 + } + return scanTypeNullFloat + + case fieldTypeDouble: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat64 + } + return scanTypeNullFloat + + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeTime: + return scanTypeRawBytes + + case fieldTypeDate, fieldTypeNewDate, + fieldTypeTimestamp, fieldTypeDateTime: + // NullTime is always returned for more consistent behavior as it can + // handle both cases of parseTime regardless if the field is nullable. + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 0000000..4020f91 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,183 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + // if packetSize == 0, the Reader contains no data + if err == nil && packetSize > 0 { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + _, err = mc.readResultOK() + return err + } + + mc.readPacket() + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 0000000..f63d250 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1309 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)−1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, ErrInvalidConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + mc.cleanup() + errLog.Print(ErrMalformPkt) + } else { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } + if n == 0 && pktLen == len(data)-4 { + // only for the first loop iteration when nothing was written yet + return errBadConnNoWrite + } + mc.cleanup() + errLog.Print(err) + } + return ErrInvalidConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], cipher) + return b[:], nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + // Assume native client during response + pos += copy(data[pos:], "mysql_native_password") + data[pos] = 0x00 + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + // https://dev.mysql.com/doc/internals/en/old-password-authentication.html + // Old password authentication only need and will need 8-byte challenge. + scrambleBuff := scrambleOldPassword(cipher[:8], []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Client clear text authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeClearAuthPacket() error { + // Calculate the packet length and add a tailing 0 + pktLen := len(mc.cfg.Passwd) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add the clear password [null terminated string] + copy(data[4:], mc.cfg.Passwd) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Native password authentication method +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error { + // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html + // Native password authentication only need and will need 20-byte challenge. + scrambleBuff := scramblePassword(cipher[0:20], []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add the scramble + copy(data[4:], scrambleBuff) + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() ([]byte, error) { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return nil, mc.handleOkPacket(data) + + case iEOF: + if len(data) > 1 { + pluginEndIndex := bytes.IndexByte(data, 0x00) + plugin := string(data[1:pluginEndIndex]) + cipher := data[pluginEndIndex+1:] + + switch plugin { + case "mysql_old_password": + // using old_passwords + return cipher, ErrOldPassword + case "mysql_clear_password": + // using clear text password + return cipher, ErrCleartextPassword + case "mysql_native_password": + // using mysql default authentication method + return cipher, ErrNativePassword + default: + return cipher, ErrUnknownPlugin + } + } + + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, ErrOldPassword + + default: // Error otherwise + return nil, mc.handleErrorPacket(data) + } + } + return nil, err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION + // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) + if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // Oops; we are connected to a read-only connection, and won't be able + // to issue any write statements. Since RejectReadOnly is configured, + // we throw away this connection hoping this one would have write + // permission. This is specifically for a possible race condition + // during failover (e.g. on AWS Aurora). See README.md for more. + // + // We explicitly close the connection before returning + // driver.ErrBadConn to ensure that `database/sql` purges this + // connection and initiates a new one for next statement next time. + mc.Close() + return driver.ErrBadConn + } + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if mc.status&statusMoreResultsExists != 0 { + return nil + } + + // warning count [2 bytes] + + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + pos += n + 1 + 2 + + // Length [uint32] + columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) + pos += 4 + + // Field type [uint8] + columns[i].fieldType = fieldType(data[pos]) + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + if rows.rs.done { + return io.EOF + } + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.rs.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return errBadConnNoWrite + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = byte(fieldTypeDouble) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = byte(fieldTypeTiny) + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + var a [64]byte + var b = a[:0] + + if v.IsZero() { + b = append(b, "0000-00-00"...) + } else { + b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(b)), + ) + paramValues = append(paramValues, b...) + + default: + return fmt.Errorf("can not convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + mc := rows.mc + rows.mc = nil + + // Error otherwise + return mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.rs.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.rs.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.rs.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 0000000..c6438d0 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 0000000..18f4169 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,219 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "math" + "reflect" +) + +type resultSet struct { + columns []mysqlField + columnNames []string + done bool +} + +type mysqlRows struct { + mc *mysqlConn + rs resultSet + finish func() +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +func (rows *mysqlRows) Columns() []string { + if rows.rs.columnNames != nil { + return rows.rs.columnNames + } + + columns := make([]string, len(rows.rs.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.rs.columns[i].name + } else { + columns[i] = rows.rs.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.rs.columns[i].name + } + } + + rows.rs.columnNames = columns + return columns +} + +func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { + if name, ok := typeDatabaseName[rows.rs.columns[i].fieldType]; ok { + return name + } + return "" +} + +// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { +// return int64(rows.rs.columns[i].length), true +// } + +func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case fieldTypeDecimal, fieldTypeNewDecimal: + if decimals > 0 { + return int64(column.length) - 2, decimals, true + } + return int64(column.length) - 1, decimals, true + case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: + return decimals, decimals, true + case fieldTypeFloat, fieldTypeDouble: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + +func (rows *mysqlRows) Close() (err error) { + if f := rows.finish; f != nil { + f() + rows.finish = nil + } + + mc := rows.mc + if mc == nil { + return nil + } + if err := mc.error(); err != nil { + return err + } + + // Remove unread packets from stream + if !rows.rs.done { + err = mc.readUntilEOF() + } + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *mysqlRows) HasNextResultSet() (b bool) { + if rows.mc == nil { + return false + } + return rows.mc.status&statusMoreResultsExists != 0 +} + +func (rows *mysqlRows) nextResultSet() (int, error) { + if rows.mc == nil { + return 0, io.EOF + } + if err := rows.mc.error(); err != nil { + return 0, err + } + + // Remove unread packets from stream + if !rows.rs.done { + if err := rows.mc.readUntilEOF(); err != nil { + return 0, err + } + rows.rs.done = true + } + + if !rows.HasNextResultSet() { + rows.mc = nil + return 0, io.EOF + } + rows.rs = resultSet{} + return rows.mc.readResultSetHeaderPacket() +} + +func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { + for { + resLen, err := rows.nextResultSet() + if err != nil { + return 0, err + } + + if resLen > 0 { + return resLen, nil + } + + rows.rs.done = true + } +} + +func (rows *binaryRows) NextResultSet() error { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) NextResultSet() (err error) { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 0000000..98e57bc --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,178 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "io" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.closed.IsSet() { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + if resLen > 0 { + // Columns + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + + // Rows + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if err := mc.discardResults(); err != nil { + return nil, err + } + + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + return stmt.query(args) +} + +func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + rows.rs.columns, err = mc.readColumns(resLen) + } else { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + return rows, err +} + +type converter struct{} + +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + if v != nil { + if valuer, ok := v.(driver.Valuer); ok { + return valuer.Value() + } + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return c.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + case reflect.Bool: + return rv.Bool(), nil + case reflect.Slice: + ek := rv.Type().Elem().Kind() + if ek == reflect.Uint8 { + return rv.Bytes(), nil + } + return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek) + case reflect.String: + return rv.String(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 0000000..417d727 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 0000000..a92a402 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,822 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" + "time" +) + +var ( + tlsConfigLock sync.RWMutex + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// Note: The tls.Config provided to needs to be exclusively owned by the driver after registering. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("key '%s' is reserved", key) + } + + tlsConfigLock.Lock() + if tlsConfigRegister == nil { + tlsConfigRegister = make(map[string]*tls.Config) + } + + tlsConfigRegister[key] = config + tlsConfigLock.Unlock() + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + tlsConfigLock.Lock() + if tlsConfigRegister != nil { + delete(tlsConfigRegister, key) + } + tlsConfigLock.Unlock() +} + +func getTLSConfigClone(key string) (config *tls.Config) { + tlsConfigLock.RLock() + if v, ok := tlsConfigRegister[key]; ok { + config = cloneTLSConfig(v) + } + tlsConfigLock.RUnlock() + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + + switch b[0] { + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +/****************************************************************************** +* Sync utils * +******************************************************************************/ + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// atomicBool is a wrapper around uint32 for usage as a boolean value with +// atomic access. +type atomicBool struct { + _noCopy noCopy + value uint32 +} + +// IsSet returns wether the current boolean value is true +func (ab *atomicBool) IsSet() bool { + return atomic.LoadUint32(&ab.value) > 0 +} + +// Set sets the value of the bool regardless of the previous value +func (ab *atomicBool) Set(value bool) { + if value { + atomic.StoreUint32(&ab.value, 1) + } else { + atomic.StoreUint32(&ab.value, 0) + } +} + +// TrySet sets the value of the bool and returns wether the value changed +func (ab *atomicBool) TrySet(value bool) bool { + if value { + return atomic.SwapUint32(&ab.value, 1) == 0 + } + return atomic.SwapUint32(&ab.value, 0) > 0 +} + +// atomicBool is a wrapper for atomically accessed error values +type atomicError struct { + _noCopy noCopy + value atomic.Value +} + +// Set sets the error value regardless of the previous value. +// The value must not be nil +func (ae *atomicError) Set(value error) { + ae.value.Store(value) +} + +// Value returns the current error value +func (ae *atomicError) Value() error { + if v := ae.value.Load(); v != nil { + // this will panic if the value doesn't implement the error interface + return v.(error) + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go new file mode 100644 index 0000000..f595634 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go @@ -0,0 +1,40 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.7 +// +build !go1.8 + +package mysql + +import "crypto/tls" + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go new file mode 100644 index 0000000..7d8c9b1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go @@ -0,0 +1,49 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "errors" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return c.Clone() +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("mysql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + +func mapIsolationLevel(level driver.IsolationLevel) (string, error) { + switch sql.IsolationLevel(level) { + case sql.LevelRepeatableRead: + return "REPEATABLE READ", nil + case sql.LevelReadCommitted: + return "READ COMMITTED", nil + case sql.LevelReadUncommitted: + return "READ UNCOMMITTED", nil + case sql.LevelSerializable: + return "SERIALIZABLE", nil + default: + return "", errors.New("mysql: unsupported isolation level: " + string(level)) + } +} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 0000000..1931f40 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 0000000..9171c97 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 0000000..20e391f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 0000000..41f8ed5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer Dialer = *DefaultDialer + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + // Get network dial function. + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + // If needed, wrap the dial function to set the connection deadline. + if !deadline.Equal(time.Time{}) { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 0000000..4f0d943 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 0000000..babb007 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 0000000..813ffb1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 0000000..5f46bf4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1157 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { + mu := make(chan bool, 1) + mu <- true + + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } + } + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } + } + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) + } + + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + return err +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 0000000..1ea1505 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 0000000..018541c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go new file mode 100644 index 0000000..a509a21 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "net" + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 0000000..37edaff --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 0000000..dcce1a6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 0000000..dc2c1f6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 0000000..577fce9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 0000000..2aac060 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 0000000..1efffbd --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 0000000..bf2478e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + fowardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.fowardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 0000000..aee2705 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-WebSocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + var ( + netConn net.Conn + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 0000000..385fa01 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 0000000..2e668f6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 0000000..0018dc7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 0000000..2a72757 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,171 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 0000000..47a99e5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 0000000..13cc5e3 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1061 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + inputVal := reflect.ValueOf(input) + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec( + d.config.DecodeHook, + inputVal.Type(), outVal.Type(), input) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + inputKind := getKind(outVal) + switch inputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, inputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + tagParts := strings.Split(tagValue, ",") + + // Determine the name of the key in the map + keyName := f.Name + if tagParts[0] != "" { + if tagParts[0] == "-" { + continue + } + keyName = tagParts[0] + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + if squash && v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 0000000..c67dad6 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 0000000..003e99f --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/sendgrid/rest/CHANGELOG.md b/vendor/github.com/sendgrid/rest/CHANGELOG.md new file mode 100644 index 0000000..abd855c --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CHANGELOG.md @@ -0,0 +1,58 @@ +# Change Log +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/). + +## [2.4.1] - 2018-4-09 +### Fixed +- Pull #71, Solves #70 +- Fix Travis CI Build +- Special thanks to [Vasko Zdravevski](https://github.com/vaskoz) for the PR! + +## [2.4.0] - 2017-4-10 +### Added +- Pull #18, Solves #17 +- Add RestError Struct for an error handling +- Special thanks to [Takahiro Ikeuchi](https://github.com/iktakahiro) for the PR! + +## [2.3.1] - 2016-10-14 +### Changed +- Pull #15, solves Issue #7 +- Moved QueryParams processing into BuildRequestObject +- Special thanks to [Gábor Lipták](https://github.com/gliptak) for the PR! + +## [2.3.0] - 2016-10-04 +### Added +- Pull [#10] [Allow for custom Content-Types](https://github.com/sendgrid/rest/issues/10) + +## [2.2.0] - 2016-07-28 +### Added +- Pull [#9](https://github.com/sendgrid/rest/pull/9): Allow for setting a custom HTTP client +- [Here](https://github.com/sendgrid/rest/blob/master/rest_test.go#L127) is an example of usage +- This enables usage of the [sendgrid-go library](https://github.com/sendgrid/sendgrid-go) on [Google App Engine (GAE)](https://cloud.google.com/appengine/) +- Special thanks to [Chris Broadfoot](https://github.com/broady) and [Sridhar Venkatakrishnan](https://github.com/sridharv) for providing code and feedback! + +## [2.1.0] - 2016-06-10 +### Added +- Automatically add Content-Type: application/json when there is a request body + +## [2.0.0] - 2016-06-03 +### Changed +- Made the Request and Response variables non-redundant. e.g. request.RequestBody becomes request.Body + +## [1.0.2] - 2016-04-07 +### Added +- these changes are thanks to [deckarep](https://github.com/deckarep). Thanks! +- more updates to error naming convention +- more error handing on HTTP request + +## [1.0.1] - 2016-04-07 +### Added +- these changes are thanks to [deckarep](https://github.com/deckarep). Thanks! +- update error naming convention +- explicitly define supported HTTP verbs +- better error handing on HTTP request + +## [1.0.0] - 2016-04-05 +### Added +- We are live! diff --git a/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md b/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..b2439f6 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CODE_OF_CONDUCT.md @@ -0,0 +1,41 @@ +# SendGrid Community Code of Conduct + +The SendGrid open source community is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences successes and continued growth. When you're working with members of the community, we encourage you to follow these guidelines, which help steer our interactions and strive to maintain a positive, successful and growing community. + +### Be Open +Members of the community are open to collaboration, whether it's on pull requests, code reviews, approvals, issues or otherwise. We're receptive to constructive comments and criticism, as the experiences and skill sets of all members contribute to the whole of our efforts. We're accepting of all who wish to take part in our activities, fostering an environment where anyone can participate, and everyone can make a difference. + +### Be Considerate +Members of the community are considerate of their peers, which include other contributors and users of SendGrid. We're thoughtful when addressing the efforts of others, keeping in mind that often the labor was completed with the intent of the good of the community. We're attentive in our communications, whether in person or online, and we're tactful when approaching differing views. + +### Be Respectful +Members of the community are respectful. We're respectful of others, their positions, their skills, their commitments and their efforts. We're respectful of the volunteer efforts that permeate the SendGrid community. We're respectful of the processes outlined in the community, and we work within them. When we disagree, we are courteous in raising our issues. Overall, we're good to each other. We contribute to this community not because we have to, but because we want to. If we remember that, these guidelines will come naturally. + +## Additional Guidance + +### Disclose Potential Conflicts of Interest +Community discussions often involve interested parties. We expect participants to be aware when they are conflicted due to employment or other projects they are involved in and disclose those interests to other project members. When in doubt, over-disclose. Perceived conflicts of interest are important to address so that the community’s decisions are credible even when unpopular, difficult or favorable to the interests of one group over another. + +### Interpretation +This Code is not exhaustive or complete. It is not a rulebook; it serves to distill our common understanding of a collaborative, shared environment and goals. We expect it to be followed in spirit as much as in the letter. When in doubt, try to abide by [SendGrid’s cultural values](https://sendgrid.com/blog/employee-engagement-the-4h-way) defined by our “4H’s”: Happy, Hungry, Humble and Honest. + +### Enforcement +Most members of the SendGrid community always comply with this Code, not because of the existence of this Code, but because they have long experience participating in open source communities where the conduct described above is normal and expected. However, failure to observe this Code may be grounds for suspension, reporting the user for abuse or changing permissions for outside contributors. + +## If you have concerns about someone’s conduct +**Initiate Direct Contact** - It is always appropriate to email a community member (if contact information is available), mention that you think their behavior was out of line, and (if necessary) point them to this Code. + +**Discuss Publicly** - Discussing publicly is always acceptable. Note, though, that approaching the person directly may be better, as it tends to make them less defensive, and it respects the time of other community members, so you probably want to try direct contact first. + +**Contact the Moderators** - You can reach the SendGrid moderators by emailing dx@sendgrid.com. + +## Submission to SendGrid Repositories +Finally, just a reminder, changes to the SendGrid repositories will only be accepted upon completion of the [SendGrid Contributor Agreement](https://cla.sendgrid.com). + +## Attribution + +SendGrid thanks the following, on which it draws for content and inspiration: + +* [Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/) +* [Open Source Initiative General Code of Conduct](https://opensource.org/codeofconduct) +* [Apache Code of Conduct](https://www.apache.org/foundation/policies/conduct.html) diff --git a/vendor/github.com/sendgrid/rest/CONTRIBUTING.md b/vendor/github.com/sendgrid/rest/CONTRIBUTING.md new file mode 100644 index 0000000..9b66ba6 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/CONTRIBUTING.md @@ -0,0 +1,185 @@ +Hello! Thank you for choosing to help contribute to one of the SendGrid open source projects. There are many ways you can contribute and help is always welcome. We simply ask that you follow the following contribution policies. + +- [CLAs and CCLAs](#cla) +- [Roadmap & Milestones](#roadmap) +- [Feature Request](#feature-request) +- [Submit a Bug Report](#submit-a-bug-report) +- [Improvements to the Codebase](#improvements-to-the-codebase) +- [Understanding the Code Base](#understanding-the-codebase) +- [Testing](#testing) +- [Style Guidelines & Naming Conventions](#style-guidelines-and-naming-conventions) +- [Creating a Pull Request](#creating-a-pull-request) + + +We use [Milestones](https://github.com/sendgrid/rest/milestones) to help define current roadmaps, please feel free to grab an issue from the current milestone. Please indicate that you have begun work on it to avoid collisions. Once a PR is made, community review, comments, suggestions and additional PRs are welcomed and encouraged. + + +## CLAs and CCLAs + +Before you get started, SendGrid requires that a SendGrid Contributor License Agreement (CLA) be filled out by every contributor to a SendGrid open source project. + +Our goal with the CLA is to clarify the rights of our contributors and reduce other risks arising from inappropriate contributions. The CLA also clarifies the rights SendGrid holds in each contribution and helps to avoid misunderstandings over what rights each contributor is required to grant to SendGrid when making a contribution. In this way the CLA encourages broad participation by our open source community and helps us build strong open source projects, free from any individual contributor withholding or revoking rights to any contribution. + +SendGrid does not merge a pull request made against a SendGrid open source project until that pull request is associated with a signed CLA. Copies of the CLA are available [here](https://gist.github.com/SendGridDX/98b42c0a5d500058357b80278fde3be8#file-sendgrid-cla). + +When you create a Pull Request, after a few seconds, a comment will appear with a link to the CLA. Click the link and fill out the brief form and then click the "I agree" button and you are all set. You will not be asked to re-sign the CLA unless we make a change. + +There are a few ways to contribute, which we'll enumerate below: + + +## Feature Request + +If you'd like to make a feature request, please read this section. + +The GitHub issue tracker is the preferred channel for library feature requests, but please respect the following restrictions: + +- Please **search for existing issues** in order to ensure we don't have duplicate bugs/feature requests. +- Please be respectful and considerate of others when commenting on issues + + +## Submit a Bug Report + +Note: DO NOT include your credentials in ANY code examples, descriptions, or media you make public. + +A software bug is a demonstrable issue in the code base. In order for us to diagnose the issue and respond as quickly as possible, please add as much detail as possible into your bug report. + +Before you decide to create a new issue, please try the following: + +1. Check the Github issues tab if the identified issue has already been reported, if so, please add a +1 to the existing post. +2. Update to the latest version of this code and check if issue has already been fixed +3. Copy and fill in the Bug Report Template we have provided below + +### Please use our Bug Report Template + +In order to make the process easier, we've included a [sample bug report template](https://github.com/sendgrid/rest/.github/ISSUE_TEMPLATE) (borrowed from [Ghost](https://github.com/TryGhost/Ghost/)). The template uses [GitHub flavored markdown](https://help.github.com/articles/github-flavored-markdown/) for formatting. + + +## Improvements to the Codebase + +We welcome direct contributions to the rest code base. Thank you! + +### Development Environment ### + +#### Install and Run Locally #### + +##### Prerequisites ##### + +- Go version 1.6 + +##### Initial setup: ##### + +```bash +git clone https://github.com/sendgrid/rest.git +cd rest +``` + +##### Execute: ##### + +See the [examples folder](https://github.com/sendgrid/rest/tree/master/examples) to get started quickly. + +If you want to try the SendGrid example: + +First, get your free SendGrid account [here](https://sendgrid.com/free?source=rest). + +You will need to setup the following environment to use the SendGrid example: + +``` +echo "export SENDGRID-API-KEY='YOUR-API-KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +go run examples/example.go +``` + + +## Understanding the Code Base + +**/examples** + +Working examples that demonstrate usage. + +**rest.go** + +There is a struct to hold both the request and response to the API server. + +The main function that does the heavy lifting (and external entry point) is `API`. + + +## Testing + +All PRs require passing tests before the PR will be reviewed. + +All test files are in [`rest-test.go`](https://github.com/sendgrid/rest/blob/master/rest_test.go). + +For the purposes of contributing to this repo, please update the [`rest-test.go`](https://github.com/sendgrid/rest/blob/master/rest_test.go) file with unit tests as you modify the code. + +Run the test: + +```bash +go test -v +``` + + +## Style Guidelines & Naming Conventions + +Generally, we follow the style guidelines as suggested by the official language. However, we ask that you conform to the styles that already exist in the library. If you wish to deviate, please explain your reasoning. + +- [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + +Please run your code through: + +- [fmt](https://blog.golang.org/go-fmt-your-code) + +## Creating a Pull Request + +1. [Fork](https://help.github.com/fork-a-repo/) the project, clone your fork, + and configure the remotes: + + ```bash + # Clone your fork of the repo into the current directory + git clone https://github.com/sendgrid/rest + # Navigate to the newly cloned directory + cd rest + # Assign the original repo to a remote called "upstream" + git remote add upstream https://github.com/sendgrid/rest + ``` + +2. If you cloned a while ago, get the latest changes from upstream: + + ```bash + git checkout + git pull upstream + ``` + +3. Create a new topic branch (off the main project development branch) to + contain your feature, change, or fix: + + ```bash + git checkout -b + ``` + +4. Commit your changes in logical chunks. Please adhere to these [git commit + message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) + or your code is unlikely be merged into the main project. Use Git's + [interactive rebase](https://help.github.com/articles/interactive-rebase) + feature to tidy up your commits before making them public. + +4a. Create tests. + +4b. Create or update the example code that demonstrates the functionality of this change to the code. + +5. Locally merge (or rebase) the upstream development branch into your topic branch: + + ```bash + git pull [--rebase] upstream master + ``` + +6. Push your topic branch up to your fork: + + ```bash + git push origin + ``` + +7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) + with a clear title and description against the `master` branch. All tests must be passing before we will review the PR. + +If you have any additional questions, please feel free to [email](mailto:dx@sendgrid.com) us or create an issue in this repo. diff --git a/vendor/github.com/sendgrid/rest/LICENSE.txt b/vendor/github.com/sendgrid/rest/LICENSE.txt new file mode 100644 index 0000000..4e9ed16 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016-2018 SendGrid, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sendgrid/rest/README.md b/vendor/github.com/sendgrid/rest/README.md new file mode 100644 index 0000000..c75e582 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/README.md @@ -0,0 +1,162 @@ +![SendGrid Logo](https://uiux.s3.amazonaws.com/2016-logos/email-logo%402x.png) + +[![Build Status](https://travis-ci.org/sendgrid/rest.svg?branch=master)](https://travis-ci.org/sendgrid/rest) +[![GoDoc](https://godoc.org/github.com/sendgrid/rest?status.png)](http://godoc.org/github.com/sendgrid/rest) +[![Go Report Card](https://goreportcard.com/badge/github.com/sendgrid/rest)](https://goreportcard.com/report/github.com/sendgrid/rest) +[![Email Notifications Badge](https://dx.sendgrid.com/badge/go)](https://dx.sendgrid.com/newsletter/go) +[![Twitter Follow](https://img.shields.io/twitter/follow/sendgrid.svg?style=social&label=Follow)](https://twitter.com/sendgrid) +[![GitHub contributors](https://img.shields.io/github/contributors/sendgrid/rest.svg)](https://github.com/sendgrid/rest/graphs/contributors) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE.txt) + +**Quickly and easily access any RESTful or RESTful-like API.** + +If you are looking for the SendGrid API client library, please see [this repo](https://github.com/sendgrid/sendgrid-go). + +# Announcements + +All updates to this library is documented in our [CHANGELOG](https://github.com/sendgrid/rest/blob/master/CHANGELOG.md). + +# Table of Contents +- [Installation](#installation) +- [Quick Start](#quick-start) +- [Usage](#usage) +- [Roadmap](#roadmap) +- [How to Contribute](#contribute) +- [About](#about) +- [License](#license) + + +# Installation + +## Prerequisites + +- Go version 1.6.X, 1.7.X, 1.8.X, 1.9.X or 1.10.X + +## Install Package + +```bash +go get github.com/sendgrid/rest +``` + +## Setup Environment Variables + +### Initial Setup + +```bash +cp .env_sample .env +``` + +### Environment Variable + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + + +# Quick Start + +`GET /your/api/{param}/call` + +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + method := rest.Get + request := rest.Request{ + Method: method, + BaseURL: baseURL, + } + response, err := rest.Send(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +`POST /your/api/{param}/call` with headers, query parameters and a request body. + +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + Headers := make(map[string]string) + key := os.Getenv("API_KEY") + Headers["Authorization"] = "Bearer " + key + Headers["X-Test"] = "Test" + var Body = []byte(`{"some": 0, "awesome": 1, "data": 3}`) + queryParams := make(map[string]string) + queryParams["hello"] = "0" + queryParams["world"] = "1" + method := rest.Post + request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, + } + response, err := rest.Send(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# Usage + +- [Usage Examples](USAGE.md) + + +# Roadmap + +If you are interested in the future direction of this project, please take a look at our [milestones](https://github.com/sendgrid/rest/milestones). We would love to hear your feedback. + + +# How to Contribute + +We encourage contribution to our projects, please see our [CONTRIBUTING](https://github.com/sendgrid/rest/blob/master/CONTRIBUTING.md) guide for details. + +Quick links: + +- [Feature Request](https://github.com/sendgrid/rest/blob/master/CONTRIBUTING.md#feature-request) +- [Bug Reports](https://github.com/sendgrid/rest/blob/master/CONTRIBUTING.md#submit-a-bug-report) +- [Sign the CLA to Create a Pull Request](https://github.com/sendgrid/rest/blob/master/CONTRIBUTING.md#cla) +- [Improvements to the Codebase](https://github.com/sendgrid/rest/blob/master/CONTRIBUTING.md#improvements-to-the-codebase) + + +# About + +rest is guided and supported by the SendGrid [Developer Experience Team](mailto:dx@sendgrid.com). + +rest is maintained and funded by SendGrid, Inc. The names and logos for rest are trademarks of SendGrid, Inc. + + +# License +[The MIT License (MIT)](LICENSE.txt) diff --git a/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md b/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md new file mode 100644 index 0000000..2ba74e4 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/TROUBLESHOOTING.md @@ -0,0 +1,62 @@ +## Table of Contents + +* [Viewing the Request Body](#request-body) + + + +## Viewing the Request Body + +When debugging or testing, it may be useful to exampine the raw request body to compare against the [documented format](https://sendgrid.com/docs/API_Reference/api_v3.html). + +Example Code +```go +package main + +import "github.com/sendgrid/rest" +import "fmt" + +func main() { + const host = "https://api.example.com" + param := "myparam" + endpoint := "/your/api/" + param + "/call" + baseURL := host + endpoint + Headers := make(map[string]string) + key := os.Getenv("API_KEY") + Headers["Authorization"] = "Bearer " + key + Headers["X-Test"] = "Test" + var Body = []byte(`{"some": 0, "awesome": 1, "data": 3}`) + queryParams := make(map[string]string) + queryParams["hello"] = "0" + queryParams["world"] = "1" + method := rest.Post + request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, + } + response, err := rest.API(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +You can do this right before you call +`response, err := rest.API(request)` like so: + +```go +fmt.Printf("Request Body: %v \n", string(request.Body)) + +req, e := BuildRequestObject(request) +requestDump, err := httputil.DumpRequest(req, true) +if err != nil { + t.Errorf("Error : %v", err) +} +fmt.Printf("Request : %v \n", string(requestDump)) +``` \ No newline at end of file diff --git a/vendor/github.com/sendgrid/rest/USAGE.md b/vendor/github.com/sendgrid/rest/USAGE.md new file mode 100644 index 0000000..40def71 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/USAGE.md @@ -0,0 +1,211 @@ +# Usage + +Usage examples for SendGrid REST library + +## Initialization + +```go +package main + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/sendgrid/rest" +) + +// Build the URL +const host = "https://api.sendgrid.com" +endpoint := "/v3/api_keys" +baseURL := host + endpoint + +// Build the request headers +key := os.Getenv("SENDGRID_API_KEY") +Headers := make(map[string]string) +Headers["Authorization"] = "Bearer " + key +``` + +## Table of Contents + +- [GET](#get) +- [DELETE](#delete) +- [POST](#post) +- [PUT](#put) +- [PATCH](#patch) + + +## GET + +#### GET Single + +```go +method = rest.Get + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +#### GET Collection + +```go +method := rest.Get + +// Build the query parameters +queryParams := make(map[string]string) +queryParams["limit"] = "100" +queryParams["offset"] = "0" + +// Make the API call +request := rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, +} +response, err := rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## DELETE + +```go +method = rest.Delete + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + QueryParams: queryParams, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Headers) +} +``` + + +## POST + +```go +method = rest.Post + +// Build the request body +var Body = []byte(`{ + "name": "My API Key", + "scopes": [ + "mail.send", + "alerts.create", + "alerts.read" + ] +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL, + Headers: Headers, + QueryParams: queryParams, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} + +// Get a particular return value. +// Note that you can unmarshall into a struct if +// you know the JSON structure in advance. +b := []byte(response.Body) +var f interface{} +err = json.Unmarshal(b, &f) +if err != nil { + fmt.Println(err) +} +m := f.(map[string]interface{}) +apiKey := m["api_key_id"].(string) +``` + +## PUT + +```go +method = rest.Put + +// Build the request body +Body = []byte(`{ + "name": "A New Hope", + "scopes": [ + "user.profile.read", + "user.profile.update" + ] +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## PATCH + +```go +method = rest.Patch + +// Build the request body +Body = []byte(`{ + "name": "A New Hope" +}`) + +// Make the API call +request = rest.Request{ + Method: method, + BaseURL: baseURL + "/" + apiKey, + Headers: Headers, + Body: Body, +} +response, err = rest.API(request) +if err != nil { + fmt.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` \ No newline at end of file diff --git a/vendor/github.com/sendgrid/rest/rest.go b/vendor/github.com/sendgrid/rest/rest.go new file mode 100644 index 0000000..46b6157 --- /dev/null +++ b/vendor/github.com/sendgrid/rest/rest.go @@ -0,0 +1,145 @@ +// Package rest allows for quick and easy access any REST or REST-like API. +package rest + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/url" +) + +// Method contains the supported HTTP verbs. +type Method string + +// Supported HTTP verbs. +const ( + Get Method = "GET" + Post Method = "POST" + Put Method = "PUT" + Patch Method = "PATCH" + Delete Method = "DELETE" +) + +// Request holds the request to an API Call. +type Request struct { + Method Method + BaseURL string // e.g. https://api.sendgrid.com + Headers map[string]string + QueryParams map[string]string + Body []byte +} + +// RestError is a struct for an error handling. +type RestError struct { + Response *Response +} + +// Error is the implementation of the error interface. +func (e *RestError) Error() string { + return e.Response.Body +} + +// DefaultClient is used if no custom HTTP client is defined +var DefaultClient = &Client{HTTPClient: http.DefaultClient} + +// Client allows modification of client headers, redirect policy +// and other settings +// See https://golang.org/pkg/net/http +type Client struct { + HTTPClient *http.Client +} + +// Response holds the response from an API call. +type Response struct { + StatusCode int // e.g. 200 + Body string // e.g. {"result: success"} + Headers map[string][]string // e.g. map[X-Ratelimit-Limit:[600]] +} + +// AddQueryParameters adds query parameters to the URL. +func AddQueryParameters(baseURL string, queryParams map[string]string) string { + baseURL += "?" + params := url.Values{} + for key, value := range queryParams { + params.Add(key, value) + } + return baseURL + params.Encode() +} + +// BuildRequestObject creates the HTTP request object. +func BuildRequestObject(request Request) (*http.Request, error) { + // Add any query parameters to the URL. + if len(request.QueryParams) != 0 { + request.BaseURL = AddQueryParameters(request.BaseURL, request.QueryParams) + } + req, err := http.NewRequest(string(request.Method), request.BaseURL, bytes.NewBuffer(request.Body)) + if err != nil { + return req, err + } + for key, value := range request.Headers { + req.Header.Set(key, value) + } + _, exists := req.Header["Content-Type"] + if len(request.Body) > 0 && !exists { + req.Header.Set("Content-Type", "application/json") + } + return req, err +} + +// MakeRequest makes the API call. +func MakeRequest(req *http.Request) (*http.Response, error) { + return DefaultClient.HTTPClient.Do(req) +} + +// BuildResponse builds the response struct. +func BuildResponse(res *http.Response) (*Response, error) { + body, err := ioutil.ReadAll(res.Body) + response := Response{ + StatusCode: res.StatusCode, + Body: string(body), + Headers: res.Header, + } + res.Body.Close() // nolint + return &response, err +} + +// API supports old implementation (deprecated) +func API(request Request) (*Response, error) { + return Send(request) +} + +// Send uses the DefaultClient to send your request +func Send(request Request) (*Response, error) { + return DefaultClient.Send(request) +} + +// The following functions enable the ability to define a +// custom HTTP Client + +// MakeRequest makes the API call. +func (c *Client) MakeRequest(req *http.Request) (*http.Response, error) { + return c.HTTPClient.Do(req) +} + +// API supports old implementation (deprecated) +func (c *Client) API(request Request) (*Response, error) { + return c.Send(request) +} + +// Send will build your request, make the request, and build your response. +func (c *Client) Send(request Request) (*Response, error) { + // Build the HTTP request object. + req, err := BuildRequestObject(request) + if err != nil { + return nil, err + } + + // Build the HTTP client and make the request. + res, err := c.MakeRequest(req) + if err != nil { + return nil, err + } + + // Build Response object. + return BuildResponse(res) +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md b/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md new file mode 100644 index 0000000..44b29f9 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CHANGELOG.md @@ -0,0 +1,97 @@ +# Change Log +All notable changes to this project will be documented in this file. + +## [3.4.1] - 2017-07-03 +### Added +- [Pull #116](https://github.com/sendgrid/sendgrid-go/pull/116): Fixing mimetypes in the NewSingleEmail function +- Big thanks to [Depado](https://github.com/Depado) for the pull request! + +## [3.4.0] - 2017-06-14 +### Added +- [Pull #96](https://github.com/sendgrid/sendgrid-go/pull/96): Send a Single Email to a Single Recipient +- Big thanks to [Oranagwa Osmond](https://github.com/andela-ooranagwa) for the pull request! + +## [3.3.1] - 2016-10-18 +### Fixed +- [Pull #95](https://github.com/sendgrid/sendgrid-go/pull/95): Use log instead of fmt for printing errors +- Big thanks to [Gábor Lipták](https://github.com/gliptak) for the pull request! + +## [3.3.0] - 2016-10-10 +### Added +- [Pull #92](https://github.com/sendgrid/sendgrid-go/pull/92): Inbound Parse Webhook support +- Checkout the [README](https://github.com/sendgrid/sendgrid-go/tree/master/helpers/inbound) for details. + +## [3.2.3] - 2016-10-10 +### Added +- [Pull #91](https://github.com/sendgrid/sendgrid-go/pull/91): Simplified code in mail helper +- Big thanks to [Roberto Ortega](https://github.com/berto) for the pull request! + +## [3.2.2] - 2016-09-08 +### Added +- Merged pull request: [update prismPath and update prism binary](https://github.com/sendgrid/sendgrid-go/pull/80) +- Special thanks to [Tom Pytleski](https://github.com/pytlesk4) for the pull request! + +## [3.2.1] - 2016-08-24 +### Added +- Table of Contents in the README +- Added a [USE_CASES.md](https://github.com/sendgrid/sendgrid-go/blob/master/USE_CASES.md) section, with the first use case example for transactional templates + +## [3.2.0] - 2016-08-17 +### Added +- Merged pull request: [make contents var args in NewV3MailInit](https://github.com/sendgrid/sendgrid-go/pull/75) +- The `NewV3MailInit` [Mail Helper](https://github.com/sendgrid/sendgrid-go/tree/master/helpers/mail) constructor can now take in multiple content objects. +- Thanks to [Adrien Delorme](https://github.com/azr) for the pull request! + +## [3.1.0] - 2016-07-28 +- Dependency update to v2.2.0 of [sendGrid-rest](https://github.com/sendgrid/rest/releases/tag/v2.2.0) +- Pull [#9](https://github.com/sendgrid/rest/pull/9): Allow for setting a custom HTTP client +- [Here](https://github.com/sendgrid/rest/blob/master/rest_test.go#L127) is an example of usage +- This enables usage of the [sendgrid-go library](https://github.com/sendgrid/sendgrid-go) on [Google App Engine (GAE)](https://cloud.google.com/appengine/) +- Special thanks to [Chris Broadfoot](https://github.com/broady) and [Sridhar Venkatakrishnan](https://github.com/sridharv) for providing code and feedback! + +## [3.0.6] - 2016-07-26 ## +### Added +- [Troubleshooting](https://github.com/sendgrid/sendgrid-go/blob/master/TROUBLESHOOTING.md) section + +## [3.0.5] - 2016-07-20 +### Added +- README updates +- Update introduction blurb to include information regarding our forward path +- Update the v3 /mail/send example to include non-helper usage +- Update the generic v3 example to include non-fluent interface usage + +## [3.0.4] - 2016-07-12 +### Added +- Update docs, unit tests and examples to include Sender ID +### Fixed +- Missing example query params for the examples + +## [3.0.3] - 2016-07-08 +### Fixed +- [Can't disable subscription tracking #68](https://github.com/sendgrid/sendgrid-go/issues/68) + +## [3.0.2] - 2016-07-07 +### Added +- Tests now mocked automatically against [prism](https://stoplight.io/prism/) + +## [3.0.1] - 2016-07-05 +### Added +- Accept: application/json header per https://sendgrid.com/docs/API_Reference/Web_API_v3/How_To_Use_The_Web_API_v3/requests.html + +### Updated +- Content based on our updated [Swagger/OAI doc](https://github.com/sendgrid/sendgrid-oai) + +## [3.0.0] - 2016-06-14 +### Added +- Breaking change to support the v3 Web API +- New HTTP client +- v3 Mail Send helper + +## [2.0.0] - 2015-05-02 +### Changed +- Fixed a nasty bug with orphaned connections but drops support for Go versions < 1.3. Thanks [trinchan](https://github.com/sendgrid/sendgrid-go/pull/24) + +## [1.2.0] - 2015-04-27 +### Added +- Support for API keys + diff --git a/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md b/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..b2439f6 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CODE_OF_CONDUCT.md @@ -0,0 +1,41 @@ +# SendGrid Community Code of Conduct + +The SendGrid open source community is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences successes and continued growth. When you're working with members of the community, we encourage you to follow these guidelines, which help steer our interactions and strive to maintain a positive, successful and growing community. + +### Be Open +Members of the community are open to collaboration, whether it's on pull requests, code reviews, approvals, issues or otherwise. We're receptive to constructive comments and criticism, as the experiences and skill sets of all members contribute to the whole of our efforts. We're accepting of all who wish to take part in our activities, fostering an environment where anyone can participate, and everyone can make a difference. + +### Be Considerate +Members of the community are considerate of their peers, which include other contributors and users of SendGrid. We're thoughtful when addressing the efforts of others, keeping in mind that often the labor was completed with the intent of the good of the community. We're attentive in our communications, whether in person or online, and we're tactful when approaching differing views. + +### Be Respectful +Members of the community are respectful. We're respectful of others, their positions, their skills, their commitments and their efforts. We're respectful of the volunteer efforts that permeate the SendGrid community. We're respectful of the processes outlined in the community, and we work within them. When we disagree, we are courteous in raising our issues. Overall, we're good to each other. We contribute to this community not because we have to, but because we want to. If we remember that, these guidelines will come naturally. + +## Additional Guidance + +### Disclose Potential Conflicts of Interest +Community discussions often involve interested parties. We expect participants to be aware when they are conflicted due to employment or other projects they are involved in and disclose those interests to other project members. When in doubt, over-disclose. Perceived conflicts of interest are important to address so that the community’s decisions are credible even when unpopular, difficult or favorable to the interests of one group over another. + +### Interpretation +This Code is not exhaustive or complete. It is not a rulebook; it serves to distill our common understanding of a collaborative, shared environment and goals. We expect it to be followed in spirit as much as in the letter. When in doubt, try to abide by [SendGrid’s cultural values](https://sendgrid.com/blog/employee-engagement-the-4h-way) defined by our “4H’s”: Happy, Hungry, Humble and Honest. + +### Enforcement +Most members of the SendGrid community always comply with this Code, not because of the existence of this Code, but because they have long experience participating in open source communities where the conduct described above is normal and expected. However, failure to observe this Code may be grounds for suspension, reporting the user for abuse or changing permissions for outside contributors. + +## If you have concerns about someone’s conduct +**Initiate Direct Contact** - It is always appropriate to email a community member (if contact information is available), mention that you think their behavior was out of line, and (if necessary) point them to this Code. + +**Discuss Publicly** - Discussing publicly is always acceptable. Note, though, that approaching the person directly may be better, as it tends to make them less defensive, and it respects the time of other community members, so you probably want to try direct contact first. + +**Contact the Moderators** - You can reach the SendGrid moderators by emailing dx@sendgrid.com. + +## Submission to SendGrid Repositories +Finally, just a reminder, changes to the SendGrid repositories will only be accepted upon completion of the [SendGrid Contributor Agreement](https://cla.sendgrid.com). + +## Attribution + +SendGrid thanks the following, on which it draws for content and inspiration: + +* [Python Community Code of Conduct](https://www.python.org/psf/codeofconduct/) +* [Open Source Initiative General Code of Conduct](https://opensource.org/codeofconduct) +* [Apache Code of Conduct](https://www.apache.org/foundation/policies/conduct.html) diff --git a/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md b/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md new file mode 100644 index 0000000..342e5e3 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/CONTRIBUTING.md @@ -0,0 +1,192 @@ +Hello! Thank you for choosing to help contribute to one of the SendGrid open source libraries. There are many ways you can contribute and help is always welcome. We simply ask that you follow the following contribution policies. + +- [CLAs and CCLAs](#cla) +- [Roadmap & Milestones](#roadmap) +- [Feature Request](#feature-request) +- [Submit a Bug Report](#submit-a-bug-report) +- [Improvements to the Codebase](#improvements-to-the-codebase) +- [Understanding the Code Base](#understanding-the-codebase) +- [Testing](#testing) +- [Style Guidelines & Naming Conventions](#style-guidelines-and-naming-conventions) +- [Creating a Pull Request](#creating-a-pull-request) + + +We use [Milestones](https://github.com/sendgrid/sendgrid-go/milestones) to help define current roadmaps, please feel free to grab an issue from the current milestone. Please indicate that you have begun work on it to avoid collisions. Once a PR is made, community review, comments, suggestions and additional PRs are welcomed and encouraged. + + +## CLAs and CCLAs + +Before you get started, SendGrid requires that a SendGrid Contributor License Agreement (CLA) be filled out by every contributor to a SendGrid open source project. + +Our goal with the CLA is to clarify the rights of our contributors and reduce other risks arising from inappropriate contributions. The CLA also clarifies the rights SendGrid holds in each contribution and helps to avoid misunderstandings over what rights each contributor is required to grant to SendGrid when making a contribution. In this way the CLA encourages broad participation by our open source community and helps us build strong open source projects, free from any individual contributor withholding or revoking rights to any contribution. + +SendGrid does not merge a pull request made against a SendGrid open source project until that pull request is associated with a signed CLA. Copies of the CLA are available [here](https://gist.github.com/SendGridDX/98b42c0a5d500058357b80278fde3be8#file-sendgrid_cla). + +When you create a Pull Request, after a few seconds, a comment will appear with a link to the CLA. Click the link and fill out the brief form and then click the "I agree" button and you are all set. You will not be asked to re-sign the CLA unless we make a change. + +There are a few ways to contribute, which we'll enumerate below: + + +## Feature Request + +If you'd like to make a feature request, please read this section. + +The GitHub issue tracker is the preferred channel for library feature requests, but please respect the following restrictions: + +- Please **search for existing issues** in order to ensure we don't have duplicate bugs/feature requests. +- Please be respectful and considerate of others when commenting on issues + + +## Submit a Bug Report + +Note: DO NOT include your credentials in ANY code examples, descriptions, or media you make public. + +A software bug is a demonstrable issue in the code base. In order for us to diagnose the issue and respond as quickly as possible, please add as much detail as possible into your bug report. + +Before you decide to create a new issue, please try the following: + +1. Check the Github issues tab if the identified issue has already been reported, if so, please add a +1 to the existing post. +2. Update to the latest version of this code and check if issue has already been fixed +3. Copy and fill in the Bug Report Template we have provided below + +### Please use our Bug Report Template + +In order to make the process easier, we've included a [sample bug report template](https://github.com/sendgrid/sendgrid-go/.github/ISSUE_TEMPLATE) (borrowed from [Ghost](https://github.com/TryGhost/Ghost/)). The template uses [GitHub flavored markdown](https://help.github.com/articles/github-flavored-markdown/) for formatting. + + +## Improvements to the Codebase + +We welcome direct contributions to the sendgrid-go code base. Thank you! + +### Development Environment ### + +#### Install and Run Locally #### + +##### Prerequisites ##### + +- Go 1.6 +- [rest](https://github.com/sendgrid/rest) + +##### Initial setup: ##### + +```bash +git clone https://github.com/sendgrid/sendgrid-go.git +cd sendgrid-go +``` + +## Environment Variables + +First, get your free SendGrid account [here](https://sendgrid.com/free?source=sendgrid-go). + +Next, update your environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys). + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + +##### Execute: ##### + +* Check out the documentation for [Web API v3 endpoints](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html). +* Review the corresponding [example](https://github.com/sendgrid/sendgrid-go/blob/master/examples). +* Update the file + +```bash +go run +``` + + +## Understanding the Code Base + +**/examples** + +Working examples that demonstrate usage. + +**sendgrid.go** + +The main function that does the heavy lifting (and external entry point) is `API`. + + +## Testing + +All PRs require passing tests before the PR will be reviewed. + +All test files are in [`sendgrid_test.go`](https://github.com/sendgrid/sendgrid-go/tree/master/sendgrid_test.go). + +For the purposes of contributing to this repo, please update the [`sendgrid_test.go`](https://github.com/sendgrid/sendgrid-go/tree/master/sendgrid_test.go) file with unit tests as you modify the code. + +To run the tests: + +```bash +go test -v ./... +``` + + +## Style Guidelines & Naming Conventions + +Generally, we follow the style guidelines as suggested by the official language. However, we ask that you conform to the styles that already exist in the library. If you wish to deviate, please explain your reasoning. + +- [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + +Please run your code through: + +- [fmt](https://blog.golang.org/go-fmt-your-code) + + +## Creating a Pull Request + +1. [Fork](https://help.github.com/fork-a-repo/) the project, clone your fork, + and configure the remotes: + + ```bash + # Clone your fork of the repo into the current directory + git clone https://github.com/sendgrid/sendgrid-go + + # Navigate to the newly cloned directory + cd sendgrid-go + + # Assign the original repo to a remote called "upstream" + git remote add upstream https://github.com/sendgrid/sendgrid-go + ``` + +2. If you cloned a while ago, get the latest changes from upstream: + + ```bash + git checkout + git pull upstream + ``` + +3. Create a new topic branch (off the main project development branch) to + contain your feature, change, or fix: + + ```bash + git checkout -b + ``` + +4. Commit your changes in logical chunks. Please adhere to these [git commit + message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) + or your code is unlikely be merged into the main project. Use Git's + [interactive rebase](https://help.github.com/articles/interactive-rebase) + feature to tidy up your commits before making them public. + +4a. Create tests. + +4b. Create or update the example code that demonstrates the functionality of this change to the code. + +5. Locally merge (or rebase) the upstream development branch into your topic branch: + + ```bash + git pull [--rebase] upstream master + ``` + +6. Push your topic branch up to your fork: + + ```bash + git push origin + ``` + +7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) + with a clear title and description against the `master` branch. All tests must be passing before we will review the PR. + +If you have any additional questions, please feel free to [email](mailto:dx@sendgrid.com) us or create an issue in this repo. diff --git a/vendor/github.com/sendgrid/sendgrid-go/LICENSE.txt b/vendor/github.com/sendgrid/sendgrid-go/LICENSE.txt new file mode 100644 index 0000000..7756fd6 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013-2018 SendGrid, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sendgrid/sendgrid-go/README.md b/vendor/github.com/sendgrid/sendgrid-go/README.md new file mode 100644 index 0000000..74aabc2 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/README.md @@ -0,0 +1,259 @@ +![SendGrid Logo](https://uiux.s3.amazonaws.com/2016-logos/email-logo%402x.png) + +[![BuildStatus](https://travis-ci.org/sendgrid/sendgrid-go.svg?branch=master)](https://travis-ci.org/sendgrid/sendgrid-go) +[![Email Notifications Badge](https://dx.sendgrid.com/badge/go)](https://dx.sendgrid.com/newsletter/go) +[![Go Report Card](https://goreportcard.com/badge/github.com/sendgrid/sendgrid-go)](https://goreportcard.com/report/github.com/sendgrid/sendgrid-go) +[![GoDoc](https://godoc.org/github.com/sendgrid/sendgrid-go?status.svg)](https://godoc.org/github.com/sendgrid/sendgrid-go) +[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE.txt) +[![Twitter Follow](https://img.shields.io/twitter/follow/sendgrid.svg?style=social&label=Follow)](https://twitter.com/sendgrid) +[![GitHub contributors](https://img.shields.io/github/contributors/sendgrid/sendgrid-go.svg)](https://github.com/sendgrid/sendgrid-go/graphs/contributors) +[![Open Source Helpers](https://www.codetriage.com/sendgrid/sendgrid-go/badges/users.svg)](https://www.codetriage.com/sendgrid/sendgrid-go) + +**NEW:** Subscribe to email [notifications](https://dx.sendgrid.com/newsletter/go) for releases and breaking changes. + +**This library allows you to quickly and easily use the SendGrid Web API v3 via Go.** + +Version 3.X.X of this library provides full support for all SendGrid [Web API v3](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html) endpoints, including the new [v3 /mail/send](https://sendgrid.com/blog/introducing-v3mailsend-sendgrids-new-mail-endpoint). + +This library represents the beginning of a new path for SendGrid. We want this library to be community driven and SendGrid led. We need your help to realize this goal. To help make sure we are building the right things in the right order, we ask that you create [issues](https://github.com/sendgrid/sendgrid-go/issues) and [pull requests](https://github.com/sendgrid/sendgrid-go/blob/master/CONTRIBUTING.md) or simply upvote or comment on existing issues or pull requests. + +Please browse the rest of this README for further detail. + +We appreciate your continued support, thank you! + +# Table of Contents + +* [Installation](#installation) +* [Quick Start](#quick-start) +* [Processing Inbound Email](#inbound) +* [Usage](#usage) +* [Use Cases](#use-cases) +* [Announcements](#announcements) +* [Roadmap](#roadmap) +* [How to Contribute](#contribute) +* [Troubleshooting](#troubleshooting) +* [About](#about) +* [License](#license) + + +# Installation + +## Prerequisites + +- Go version 1.6 +- The SendGrid service, starting at the [free level](https://sendgrid.com/free?source=sendgrid-go) + +## Setup Environment Variables + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + +## Install Package + +`go get github.com/sendgrid/sendgrid-go` + +## Dependencies + +- [rest](https://github.com/sendgrid/rest) + +## Setup Environment Variables + +### Initial Setup + +```bash +cp .env_sample .env +``` + +### Environment Variable + +Update the development environment with your [SENDGRID_API_KEY](https://app.sendgrid.com/settings/api_keys), for example: + +```bash +echo "export SENDGRID_API_KEY='YOUR_API_KEY'" > sendgrid.env +echo "sendgrid.env" >> .gitignore +source ./sendgrid.env +``` + + +# Quick Start + +## Hello Email + +The following is the minimum needed code to send an email with the [/mail/send Helper](https://github.com/sendgrid/sendgrid-go/tree/master/helpers/mail) ([here](https://github.com/sendgrid/sendgrid-go/blob/master/examples/helpers/mail/example.go#L32) is a full example): + +### With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Sending with SendGrid is Fun" + to := mail.NewEmail("Example User", "test@example.com") + plainTextContent := "and easy to do anywhere, even with Go" + htmlContent := "and easy to do anywhere, even with Go" + message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent) + client := sendgrid.NewSendClient(os.Getenv("SENDGRID_API_KEY")) + response, err := client.Send(message) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +The `NewEmail` constructor creates a [personalization object](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/personalizations.html) for you. [Here](https://github.com/sendgrid/sendgrid-go/blob/master/examples/helpers/mail/example.go#L28) is an example of how to add to it. + +### Without Mail Helper Class + +The following is the minimum needed code to send an email without the /mail/send Helper ([here](https://github.com/sendgrid/sendgrid-go/blob/master/examples/mail/mail.go#L47) is a full example): + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "Sending with SendGrid is Fun" + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/plain", + "value": "and easy to do anywhere, even with Go" + } + ] +}`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## General v3 Web API Usage + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/api_keys", "https://api.sendgrid.com") + request.Method = "GET" + + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + + +# Processing Inbound Email + +Please see [our helper](https://github.com/sendgrid/sendgrid-go/tree/master/helpers/inbound) for utilizing our Inbound Parse webhook. + + +# Usage + +- [SendGrid Docs](https://sendgrid.com/docs/API_Reference/index.html) +- [Library Usage Docs](https://github.com/sendgrid/sendgrid-go/tree/master/USAGE.md) +- [Example Code](https://github.com/sendgrid/sendgrid-go/tree/master/examples) +- [How-to: Migration from v2 to v3](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/how_to_migrate_from_v2_to_v3_mail_send.html) +- [v3 Web API Mail Send Helper](https://github.com/sendgrid/sendgrid-go/tree/master/helpers/mail/README.md) + + +# Use Cases + +[Examples of common API use cases](https://github.com/sendgrid/sendgrid-go/blob/master/USE_CASES.md), such as how to send an email with a transactional template. + + +# Announcements + +Join an experienced and passionate team that focuses on making an impact. Opportunities abound to grow the product - and grow your career! Check out our [Software Engineer- Delivery role](http://grnh.se/mg6dr31) + +Please see our announcement regarding [breaking changes](https://github.com/sendgrid/sendgrid-go/issues/81). Your support is appreciated! + +All updates to this library are documented in our [CHANGELOG](https://github.com/sendgrid/sendgrid-go/blob/master/CHANGELOG.md) and [releases](https://github.com/sendgrid/sendgrid-go/releases). You may also subscribe to email [release notifications](https://dx.sendgrid.com/newsletter/go) for releases and breaking changes. + + +# Roadmap + +If you are interested in the future direction of this project, please take a look at our open [issues](https://github.com/sendgrid/sendgrid-go/issues) and [pull requests](https://github.com/sendgrid/sendgrid-go/pulls). We would love to hear your feedback. + + +# How to Contribute + +We encourage contribution to our libraries (you might even score some nifty swag), please see our [CONTRIBUTING](https://github.com/sendgrid/sendgrid-go/blob/master/CONTRIBUTING.md) guide for details. + +Quick links: + +- [Feature Request](https://github.com/sendgrid/sendgrid-go/tree/master/CONTRIBUTING.md#feature-request) +- [Bug Reports](https://github.com/sendgrid/sendgrid-go/tree/master/CONTRIBUTING.md#submit-a-bug-report) +- [Sign the CLA to Create a Pull Request](https://github.com/sendgrid/sendgrid-go/tree/master/CONTRIBUTING.md#cla) +- [Improvements to the Codebase](https://github.com/sendgrid/sendgrid-go/tree/master/CONTRIBUTING.md#improvements-to-the-codebase) + + +# Troubleshooting + +Please see our [troubleshooting guide](https://github.com/sendgrid/sendgrid-go/blob/master/TROUBLESHOOTING.md) for common library issues. + + +# About + +sendgrid-go is guided and supported by the SendGrid [Developer Experience Team](mailto:dx@sendgrid.com). + +sendgrid-go is maintained and funded by SendGrid, Inc. The names and logos for sendgrid-go are trademarks of SendGrid, Inc. + +# License +[The MIT License (MIT)](LICENSE.txt) diff --git a/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md b/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md new file mode 100644 index 0000000..585dc8a --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/TROUBLESHOOTING.md @@ -0,0 +1,108 @@ +If you have a non-library SendGrid issue, please contact our [support team](https://support.sendgrid.com). + +If you can't find a solution below, please open an [issue](https://github.com/sendgrid/sendgrid-go/issues). + + +## Table of Contents + +* [Migrating from v2 to v3](#migrating) +* [Continue Using v2](#v2) +* [Testing v3 /mail/send Calls Directly](#testing) +* [Error Messages](#error) +* [Versions](#versions) +* [Environment Variables and Your SendGrid API Key](#environment) +* [Viewing the Request Body](#request-body) + + +## Migrating from v2 to v3 + +Please review [our guide](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/how_to_migrate_from_v2_to_v3_mail_send.html) on how to migrate from v2 to v3. + + +## Continue Using v2 + +[Here](https://github.com/sendgrid/sendgrid-go/tree/0bf6332788d0230b7da84a1ae68d7531073200e1) is the last working version with v2 support. + +Download: + +Click the "Clone or download" green button in [GitHub](https://github.com/sendgrid/sendgrid-go/tree/0bf6332788d0230b7da84a1ae68d7531073200e1) and choose download. + + +## Testing v3 /mail/send Calls Directly + +[Here](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/curl_examples.html) are some cURL examples for common use cases. + + +## Error Messages + +An error is returned if caused by client policy (such as CheckRedirect), or failure to speak HTTP (such as a network connectivity problem). + +To read the error message returned by SendGrid's API: + +```go +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Hello World from the SendGrid Go Library" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/plain", "some text here") + m := mail.NewV3MailInit(from, subject, to, content) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KE"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +__CAUTION__: A non-2xx status code doesn't cause an error on sendgrid.API and the application has to verify the response: + +```golang +resp, err := sendgrid.API(request) +if err != nil { + return err +} +if resp.StatusCode >= 400 { + // something goes wrong and you have to handle (e.g. returning an error to the user or logging the problem) + log.Printf("api response: HTTP %d: %s", resp.StatusCode, resp.Body) + // OR + // return fmt.Errorf("api response: HTTP %d: %s", resp.StatusCode, resp.Body) +} +``` + + +## Versions + +We follow the MAJOR.MINOR.PATCH versioning scheme as described by [SemVer.org](http://semver.org). Therefore, we recommend that you always pin (or vendor) the particular version you are working with to your code and never auto-update to the latest version. Especially when there is a MAJOR point release, since that is guaranteed to be a breaking change. Changes are documented in the [CHANGELOG](https://github.com/sendgrid/sendgrid-go/blob/master/CHANGELOG.md) and [releases](https://github.com/sendgrid/sendgrid-go/releases) section. + + +## Environment Variables and Your SendGrid API Key + +All of our examples assume you are using [environment variables](https://github.com/sendgrid/sendgrid-go#setup-environment-variables) to hold your SendGrid API key. + +If you choose to add your SendGrid API key directly (not recommended): + +`os.Getenv("SENDGRID_API_KEY")` + +becomes + +`"SENDGRID_API_KEY"` + +In the first case SENDGRID_API_KEY is in reference to the name of the environment variable, while the second case references the actual SendGrid API Key. + + +## Viewing the Request Body + +When debugging or testing, it may be useful to examine the raw request body to compare against the [documented format](https://sendgrid.com/docs/API_Reference/api_v3.html). + +You can do this right before you call `response, err := client.Send(message)` like so: + +```go +fmt.Println(string(mail.GetRequestBody(message))) +``` \ No newline at end of file diff --git a/vendor/github.com/sendgrid/sendgrid-go/USAGE.md b/vendor/github.com/sendgrid/sendgrid-go/USAGE.md new file mode 100644 index 0000000..b32913b --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/USAGE.md @@ -0,0 +1,6514 @@ +This documentation is based on our [OAI specification](https://github.com/sendgrid/sendgrid-oai). + +# INITIALIZATION + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "log" + "os" +) + +apiKey := os.Getenv("YOUR_SENDGRID_APIKEY") +host := "https://api.sendgrid.com" +``` + +# Table of Contents + +* [ACCESS SETTINGS](#access-settings) +* [ALERTS](#alerts) +* [API KEYS](#api-keys) +* [ASM](#asm) +* [BROWSERS](#browsers) +* [CAMPAIGNS](#campaigns) +* [CATEGORIES](#categories) +* [CLIENTS](#clients) +* [CONTACTDB](#contactdb) +* [DEVICES](#devices) +* [GEO](#geo) +* [IPS](#ips) +* [MAIL](#mail) +* [MAIL SETTINGS](#mail-settings) +* [MAILBOX PROVIDERS](#mailbox-providers) +* [PARTNER SETTINGS](#partner-settings) +* [SCOPES](#scopes) +* [SENDERS](#senders) +* [STATS](#stats) +* [SUBUSERS](#subusers) +* [SUPPRESSION](#suppression) +* [TEMPLATES](#templates) +* [TRACKING SETTINGS](#tracking-settings) +* [USER](#user) +* [WHITELABEL](#whitelabel) + + + +# ACCESS SETTINGS + +## Retrieve all recent access attempts + +**This endpoint allows you to retrieve a list of all of the IP addresses that recently attempted to access your account either through the User Interface or the API.** + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/activity + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/activity", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add one or more IPs to the whitelist + +**This endpoint allows you to add one or more IP addresses to your IP whitelist.** + +When adding an IP to your whitelist, include the IP address in an array. You can whitelist one IP at a time, or you can whitelist multiple IPs at once. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### POST /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "POST" +request.Body = []byte(` { + "ips": [ + { + "ip": "192.168.1.1" + }, + { + "ip": "192.*.*.*" + }, + { + "ip": "192.168.1.3/32" + } + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a list of currently whitelisted IPs + +**This endpoint allows you to retrieve a list of IP addresses that are currently whitelisted.** + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove one or more IPs from the whitelist + +**This endpoint allows you to remove one or more IPs from your IP whitelist.** + +You can remove one IP at a time, or you can remove multiple IP addresses. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### DELETE /access_settings/whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist", host) +request.Method = "DELETE" +request.Body = []byte(` { + "ids": [ + 1, + 2, + 3 + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific whitelisted IP + +**This endpoint allows you to retrieve a specific IP address that has been whitelisted.** + +You must include the ID for the specific IP address you want to retrieve in your call. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### GET /access_settings/whitelist/{rule_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist/{rule_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove a specific IP from the whitelist + +**This endpoint allows you to remove a specific IP address from your IP whitelist.** + +When removing a specific IP address from your whitelist, you must include the ID in your call. + +IP Access Management allows you to control which IP addresses can be used to access your account, either through the User Interface or the API. There is no limit to the number of IP addresses that you can add to your whitelist. It is possible to remove your own IP address from the whitelist, thus preventing yourself from accessing your account. + +For more information, please see our [User Guide](http://sendgrid.com/docs/User_Guide/Settings/ip_access_management.html). + +### DELETE /access_settings/whitelist/{rule_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/access_settings/whitelist/{rule_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# ALERTS + +## Create a new Alert + +**This endpoint allows you to create a new alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### POST /alerts + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts", host) +request.Method = "POST" +request.Body = []byte(` { + "email_to": "example@example.com", + "frequency": "daily", + "type": "stats_notification" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all alerts + +**This endpoint allows you to retrieve all of your alerts.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### GET /alerts + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update an alert + +**This endpoint allows you to update an alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### PATCH /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email_to": "example@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific alert + +**This endpoint allows you to retrieve a specific alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### GET /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an alert + +**This endpoint allows you to delete an alert.** + +Alerts allow you to specify an email address to receive notifications regarding your email usage or statistics. +* Usage alerts allow you to set the threshold at which an alert will be sent. +* Stats notifications allow you to set how frequently you would like to receive email statistics reports. For example, "daily", "weekly", or "monthly". + +For more information about alerts, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/alerts.html). + +### DELETE /alerts/{alert_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/alerts/{alert_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# API KEYS + +## Create API keys + +**This endpoint allows you to create a new random API Key for the user.** + +A JSON request body containing a "name" property is required. If number of maximum keys is reached, HTTP 403 will be returned. + +There is a limit of 100 API Keys on your account. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +See the [API Key Permissions List](https://sendgrid.com/docs/API_Reference/Web_API_v3/API_Keys/api_key_permissions_list.html) for a list of all available scopes. + +### POST /api_keys + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "My API Key", + "sample": "data", + "scopes": [ + "mail.send", + "alerts.create", + "alerts.read" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all API Keys belonging to the authenticated user + +**This endpoint allows you to retrieve all API Keys that belong to the authenticated user.** + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +### GET /api_keys + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update the name & scopes of an API Key + +**This endpoint allows you to update the name and scopes of a given API key.** + +A JSON request body with a "name" property is required. +Most provide the list of all the scopes an api key should have. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + + +### PUT /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "PUT" +request.Body = []byte(` { + "name": "A New Hope", + "scopes": [ + "user.profile.read", + "user.profile.update" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update API keys + +**This endpoint allows you to update the name of an existing API Key.** + +A JSON request body with a "name" property is required. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +## URI Parameters + +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +|api_key_id |string | required | The ID of the API Key you are updating.| + +### PATCH /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "A New Hope" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve an existing API Key + +**This endpoint allows you to retrieve a single api key.** + +If the API Key ID does not exist an HTTP 404 will be returned. + +### GET /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete API keys + +**This endpoint allows you to revoke an existing API Key** + +Authentications using this API Key will fail after this request is made, with some small propagation delay.If the API Key ID does not exist an HTTP 404 will be returned. + +The API Keys feature allows customers to be able to generate an API Key credential which can be used for authentication with the SendGrid v3 Web API or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +## URI Parameters + +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +|api_key_id |string | required | The ID of the API Key you are deleting.| + +### DELETE /api_keys/{api_key_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/api_keys/{api_key_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# ASM + +## Create a new suppression group + +**This endpoint allows you to create a new suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### POST /asm/groups + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups", host) +request.Method = "POST" +request.Body = []byte(` { + "description": "Suggestions for products our users might like.", + "is_default": true, + "name": "Product Suggestions" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve information about multiple suppression groups + +**This endpoint allows you to retrieve information about multiple suppression groups.** + +This endpoint will return information for each group ID that you include in your request. To add a group ID to your request, simply append `&id=` followed by the group ID. + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +Suppression groups, or [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html), allow you to label a category of content that you regularly send. This gives your recipients the ability to opt out of a specific set of your email. For example, you might define a group for your transactional email, and one for your marketing email so that your users can continue receiving your transactional email without having to receive your marketing content. + +### GET /asm/groups + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a suppression group. + +**This endpoint allows you to update or change a suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### PATCH /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "description": "Suggestions for items our users might like.", + "id": 103, + "name": "Item Suggestions" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get information on a single suppression group. + +**This endpoint allows you to retrieve a single suppression group.** + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### GET /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a suppression group. + +**This endpoint allows you to delete a suppression group.** + +You can only delete groups that have not been attached to sent mail in the last 60 days. If a recipient uses the "one-click unsubscribe" option on an email associated with a deleted group, that recipient will be added to the global suppression list. + +Suppression groups, or unsubscribe groups, are specific types or categories of email that you would like your recipients to be able to unsubscribe from. For example: Daily Newsletters, Invoices, System Alerts. + +The **name** and **description** of the unsubscribe group will be visible by recipients when they are managing their subscriptions. + +Each user can create up to 25 different suppression groups. + +### DELETE /asm/groups/{group_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add suppressions to a suppression group + +**This endpoint allows you to add email addresses to an unsubscribe group.** + +If you attempt to add suppressions to a group that has been deleted or does not exist, the suppressions will be added to the global suppressions list. + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### POST /asm/groups/{group_id}/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "test1@example.com", + "test2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppressions for a suppression group + +**This endpoint allows you to retrieve all suppressed email addresses belonging to the given group.** + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### GET /asm/groups/{group_id}/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Search for suppressions within a group + +**This endpoint allows you to search a suppression group for multiple suppressions.** + +When given a list of email addresses and a group ID, this endpoint will return only the email addresses that have been unsubscribed from the given group. + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### POST /asm/groups/{group_id}/suppressions/search + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions/search", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "exists1@example.com", + "exists2@example.com", + "doesnotexists@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a suppression from a suppression group + +**This endpoint allows you to remove a suppressed email address from the given suppression group.** + +Suppressions are recipient email addresses that are added to [unsubscribe groups](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). Once a recipient's address is on the suppressions list for an unsubscribe group, they will not receive any emails that are tagged with that unsubscribe group. + +### DELETE /asm/groups/{group_id}/suppressions/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/groups/{group_id}/suppressions/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppressions + +**This endpoint allows you to retrieve a list of all suppressions.** + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### GET /asm/suppressions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add recipient addresses to the global suppression group. + +**This endpoint allows you to add one or more email addresses to the global suppressions group.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### POST /asm/suppressions/global + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global", host) +request.Method = "POST" +request.Body = []byte(` { + "recipient_emails": [ + "test1@example.com", + "test2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Global Suppression + +**This endpoint allows you to retrieve a global suppression. You can also use this endpoint to confirm if an email address is already globally suppressed.** + +If the email address you include in the URL path parameter `{email}` is already globally suppressed, the response will include that email address. If the address you enter for `{email}` is not globally suppressed, an empty JSON object `{}` will be returned. + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### GET /asm/suppressions/global/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Global Suppression + +**This endpoint allows you to remove an email address from the global suppressions group.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### DELETE /asm/suppressions/global/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/global/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all suppression groups for an email address + +**This endpoint returns the list of all groups that the given email address has been unsubscribed from.** + +Suppressions are a list of email addresses that will not receive content sent under a given [group](https://sendgrid.com/docs/API_Reference/Web_API_v3/Suppression_Management/groups.html). + +### GET /asm/suppressions/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/asm/suppressions/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# BROWSERS + +## Retrieve email statistics by browser. + +**This endpoint allows you to retrieve your email statistics segmented by browser type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /browsers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/browsers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["browsers"] = "test_string" +queryParams["limit"] = "test_string" +queryParams["offset"] = "test_string" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CAMPAIGNS + +## Create a Campaign + +**This endpoint allows you to create a campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +Note: In order to send or schedule the campaign, you will be required to provide a subject, sender ID, content (we suggest both html and plain text), and at least one list or segment ID. This information is not required when you create a campaign. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns", host) +request.Method = "POST" +request.Body = []byte(` { + "categories": [ + "spring line" + ], + "custom_unsubscribe_url": "", + "html_content": "

Check out our spring line!

", + "ip_pool": "marketing", + "list_ids": [ + 110, + 124 + ], + "plain_content": "Check out our spring line!", + "segment_ids": [ + 110 + ], + "sender_id": 124451, + "subject": "New Products for Spring!", + "suppression_group_id": 42, + "title": "March Newsletter" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all Campaigns + +**This endpoint allows you to retrieve a list of all of your campaigns.** + +Returns campaigns in reverse order they were created (newest first). + +Returns an empty array if no campaigns exist. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Campaign + +Update a campaign. This is especially useful if you only set up the campaign using POST /campaigns, but didn't set many of the parameters. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### PATCH /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "categories": [ + "summer line" + ], + "html_content": "

Check out our summer line!

", + "plain_content": "Check out our summer line!", + "subject": "New Products for Summer!", + "title": "May Newsletter" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single campaign + +**This endpoint allows you to retrieve a specific campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Campaign + +**This endpoint allows you to delete a specific campaign.** + +Our Marketing Campaigns API lets you create, manage, send, and schedule campaigns. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### DELETE /campaigns/{campaign_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Scheduled Campaign + +**This endpoint allows to you change the scheduled time and date for a campaign to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### PATCH /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "PATCH" +request.Body = []byte(` { + "send_at": 1489451436 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Schedule a Campaign + +**This endpoint allows you to schedule a specific date and time for your campaign to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "POST" +request.Body = []byte(` { + "send_at": 1489771528 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## View Scheduled Time of a Campaign + +**This endpoint allows you to retrieve the date and time that the given campaign has been scheduled to be sent.** + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### GET /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Unschedule a Scheduled Campaign + +**This endpoint allows you to unschedule a campaign that has already been scheduled to be sent.** + +A successful unschedule will return a 204. +If the specified campaign is in the process of being sent, the only option is to cancel (a different method). + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### DELETE /campaigns/{campaign_id}/schedules + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Send a Campaign + +**This endpoint allows you to immediately send a campaign at the time you make the API call.** + +Normally a POST would have a request body, but since this endpoint is telling us to send a resource that is already created, a request body is not needed. + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules/now + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules/now", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Send a Test Campaign + +**This endpoint allows you to send a test campaign.** + +To send to multiple addresses, use an array for the JSON "to" value ["one@address","two@address"] + +For more information: + +* [User Guide > Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) + +### POST /campaigns/{campaign_id}/schedules/test + +```go +request := sendgrid.GetRequest(apiKey, "/v3/campaigns/{campaign_id}/schedules/test", host) +request.Method = "POST" +request.Body = []byte(` { + "to": "your.email@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CATEGORIES + +## Retrieve all categories + +**This endpoint allows you to retrieve a list of all of your categories.** + +Categories can help organize your email analytics by enabling you to tag emails by type or broad topic. You can define your own custom categories. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["category"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Email Statistics for Categories + +**This endpoint allows you to retrieve all of your email statistics for each of your categories.** + +If you do not define any query parameters, this endpoint will return a sum for each category in groups of 10. + +Categories allow you to group your emails together according to broad topics that you define. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["categories"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve sums of email stats for each category [Needs: Stats object defined, has category ID?] + +**This endpoint allows you to retrieve the total sum of each email statistic for every category over the given date range.** + +If you do not define any query parameters, this endpoint will return a sum for each category in groups of 10. + +Categories allow you to group your emails together according to broad topics that you define. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/categories.html). + +### GET /categories/stats/sums + +```go +request := sendgrid.GetRequest(apiKey, "/v3/categories/stats/sums", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CLIENTS + +## Retrieve email statistics by client type. + +**This endpoint allows you to retrieve your email statistics segmented by client type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /clients/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/clients/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve stats by a specific client type. + +**This endpoint allows you to retrieve your email statistics segmented by a specific client type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +## Available Client Types +- phone +- tablet +- webmail +- desktop + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /clients/{client_type}/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/clients/{client_type}/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# CONTACTDB + +## Create a Custom Field + +**This endpoint allows you to create a custom field.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### POST /contactdb/custom_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "pet", + "type": "text" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all custom fields + +**This endpoint allows you to retrieve all custom fields.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/custom_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Custom Field + +**This endpoint allows you to retrieve a custom field by ID.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/custom_fields/{custom_field_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields/{custom_field_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Custom Field + +**This endpoint allows you to delete a custom field by ID.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### DELETE /contactdb/custom_fields/{custom_field_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/custom_fields/{custom_field_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a List + +**This endpoint allows you to create a list for your recipients.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "your list name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all lists + +**This endpoint allows you to retrieve all of your recipient lists. If you don't have any lists, an empty array will be returned.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete Multiple lists + +**This endpoint allows you to delete multiple recipient lists.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists", host) +request.Method = "DELETE" +request.Body = []byte(` [ + 1, + 2, + 3, + 4 +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a List + +**This endpoint allows you to update the name of one of your recipient lists.** + + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### PATCH /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "newlistname" +}`) +queryParams := make(map[string]string) +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single list + +This endpoint allows you to retrieve a single recipient list. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a List + +**This endpoint allows you to delete a specific recipient list with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists/{list_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["delete_contacts"] = "true" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add Multiple Recipients to a List + +**This endpoint allows you to add multiple recipients to a list.** + +Adds existing recipients to a list, passing in the recipient IDs to add. Recipient IDs should be passed exactly as they are returned from recipient endpoints. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists/{list_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients", host) +request.Method = "POST" +request.Body = []byte(` [ + "recipient_id1", + "recipient_id2" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all recipients on a List + +**This endpoint allows you to retrieve all recipients on the list with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/lists/{list_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add a Single Recipient to a List + +**This endpoint allows you to add a single recipient to a list.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/lists/{list_id}/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients/{recipient_id}", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Single Recipient from a Single List + +**This endpoint allows you to delete a single recipient from a list.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/lists/{list_id}/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/lists/{list_id}/recipients/{recipient_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["recipient_id"] = "1" +queryParams["list_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Recipient + +**This endpoint allows you to update one or more recipients.** + +The body of an API call to this endpoint must include an array of one or more recipient objects. + +It is of note that you can add custom field data as parameters on recipient objects. We have provided an example using some of the default custom fields SendGrid provides. + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### PATCH /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "PATCH" +request.Body = []byte(` [ + { + "email": "jones@example.com", + "first_name": "Guy", + "last_name": "Jones" + } +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add recipients + +**This endpoint allows you to add a Marketing Campaigns recipient.** + +It is of note that you can add custom field data as a parameter on this endpoint. We have provided an example using some of the default custom fields SendGrid provides. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### POST /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "POST" +request.Body = []byte(` [ + { + "age": 25, + "email": "example@example.com", + "first_name": "", + "last_name": "User" + }, + { + "age": 25, + "email": "example2@example.com", + "first_name": "Example", + "last_name": "User" + } +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients + +**This endpoint allows you to retrieve all of your Marketing Campaigns recipients.** + +Batch deletion of a page makes it possible to receive an empty page of recipients before reaching the end of +the list of recipients. To avoid this issue; iterate over pages until a 404 is retrieved. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete Recipient + +**This endpoint allows you to deletes one or more recipients.** + +The body of an API call to this endpoint must include an array of recipient IDs of the recipients you want to delete. + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### DELETE /contactdb/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients", host) +request.Method = "DELETE" +request.Body = []byte(` [ + "recipient_id1", + "recipient_id2" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the count of billable recipients + +**This endpoint allows you to retrieve the number of Marketing Campaigns recipients that you will be billed for.** + +You are billed for marketing campaigns based on the highest number of recipients you have had in your account at one time. This endpoint will allow you to know the current billable count value. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/billable_count + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/billable_count", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Count of Recipients + +**This endpoint allows you to retrieve the total number of Marketing Campaigns recipients.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/recipients/count + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/count", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients matching search criteria + +**This endpoint allows you to perform a search on all of your Marketing Campaigns recipients.** + +field_name: + +* is a variable that is substituted for your actual custom field name from your recipient. +* Text fields must be url-encoded. Date fields are searchable only by unix timestamp (e.g. 2/2/2015 becomes 1422835200) +* If field_name is a 'reserved' date field, such as created_at or updated_at, the system will internally convert +your epoch time to a date range encompassing the entire day. For example, an epoch time of 1422835600 converts to +Mon, 02 Feb 2015 00:06:40 GMT, but internally the system will search from Mon, 02 Feb 2015 00:00:00 GMT through +Mon, 02 Feb 2015 23:59:59 GMT. + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/recipients/search + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/search", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["{field_name}"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single recipient + +**This endpoint allows you to retrieve a single recipient by ID from your contact database.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Recipient + +**This endpoint allows you to delete a single recipient with the given ID from your contact database.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### DELETE /contactdb/recipients/{recipient_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the lists that a recipient is on + +**This endpoint allows you to retrieve the lists that a given recipient belongs to.** + +Each recipient can be on many lists. This endpoint gives you all of the lists that any one recipient has been added to. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +### GET /contactdb/recipients/{recipient_id}/lists + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/recipients/{recipient_id}/lists", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve reserved fields + +**This endpoint allows you to list all fields that are reserved and can't be used for custom field names.** + +The contactdb is a database of your contacts for [SendGrid Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html). + +### GET /contactdb/reserved_fields + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/reserved_fields", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a Segment + +**This endpoint allows you to create a segment.** + +All recipients in your contactdb will be added or removed automatically depending on whether they match the criteria for this segment. + +List Id: + +* Send this to segment from an existing list +* Don't send this in order to segment from your entire contactdb. + +Valid operators for create and update depend on the type of the field you are segmenting: + +* **Dates:** "eq", "ne", "lt" (before), "gt" (after) +* **Text:** "contains", "eq" (is - matches the full field), "ne" (is not - matches any field where the entire field is not the condition value) +* **Numbers:** "eq", "lt", "gt" +* **Email Clicks and Opens:** "eq" (opened), "ne" (not opened) + +Segment conditions using "eq" or "ne" for email clicks and opens should provide a "field" of either *clicks.campaign_identifier* or *opens.campaign_identifier*. The condition value should be a string containing the id of a completed campaign. + +Segments may contain multiple conditions, joined by an "and" or "or" in the "and_or" field. The first condition in the conditions list must have an empty "and_or", and subsequent conditions must all specify an "and_or". + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### POST /contactdb/segments + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments", host) +request.Method = "POST" +request.Body = []byte(` { + "conditions": [ + { + "and_or": "", + "field": "last_name", + "operator": "eq", + "value": "Miller" + }, + { + "and_or": "and", + "field": "last_clicked", + "operator": "gt", + "value": "01/02/2015" + }, + { + "and_or": "or", + "field": "clicks.campaign_identifier", + "operator": "eq", + "value": "513" + } + ], + "list_id": 4, + "name": "Last Name Miller" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all segments + +**This endpoint allows you to retrieve all of your segments.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a segment + +**This endpoint allows you to update a segment.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### PATCH /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "conditions": [ + { + "and_or": "", + "field": "last_name", + "operator": "eq", + "value": "Miller" + } + ], + "list_id": 5, + "name": "The Millers" +}`) +queryParams := make(map[string]string) +queryParams["segment_id"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a segment + +**This endpoint allows you to retrieve a single segment with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["segment_id"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a segment + +**This endpoint allows you to delete a segment from your recipients database.** + +You also have the option to delete all the contacts from your Marketing Campaigns recipient database who were in this segment. + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### DELETE /contactdb/segments/{segment_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["delete_contacts"] = "true" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve recipients on a segment + +**This endpoint allows you to retrieve all of the recipients in a segment with the given ID.** + +The Contacts API helps you manage your [Marketing Campaigns](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/index.html) recipients. + +For more information about segments in Marketing Campaigns, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/lists.html#-Create-a-Segment). + +### GET /contactdb/segments/{segment_id}/recipients + +```go +request := sendgrid.GetRequest(apiKey, "/v3/contactdb/segments/{segment_id}/recipients", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["page"] = "1" +queryParams["page_size"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# DEVICES + +## Retrieve email statistics by device type. + +**This endpoint allows you to retrieve your email statistics segmented by the device type.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +## Available Device Types +| **Device** | **Description** | **Example** | +|---|---|---| +| Desktop | Email software on desktop computer. | I.E., Outlook, Sparrow, or Apple Mail. | +| Webmail | A web-based email client. | I.E., Yahoo, Google, AOL, or Outlook.com. | +| Phone | A smart phone. | iPhone, Android, Blackberry, etc. +| Tablet | A tablet computer. | iPad, android based tablet, etc. | +| Other | An unrecognized device. | + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /devices/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/devices/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# GEO + +## Retrieve email statistics by country and state/province. + +**This endpoint allows you to retrieve your email statistics segmented by country and state/province.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /geo/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/geo/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["country"] = "US" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# IPS + +## Retrieve all IP addresses + +**This endpoint allows you to retrieve a list of all assigned and unassigned IPs.** + +Response includes warm up status, pools, assigned subusers, and whitelabel info. The start_date field corresponds to when warmup started for that IP. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["subuser"] = "test_string" +queryParams["ip"] = "test_string" +queryParams["limit"] = "1" +queryParams["exclude_whitelabels"] = "true" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all assigned IPs + +**This endpoint allows you to retrieve only assigned IP addresses.** + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips/assigned + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/assigned", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create an IP pool. + +**This endpoint allows you to create an IP pool.** + +**Each user can create up to 10 different IP pools.** + +IP Pools allow you to group your dedicated SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with whitelabeled IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### POST /ips/pools + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "marketing" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IP pools. + +**This endpoint allows you to retrieve all of your IP pools.** + +IP Pools allow you to group your dedicated SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with whitelabeled IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### GET /ips/pools + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update an IP pools name. + +**This endpoint allows you to update the name of an IP pool.** + +IP Pools allow you to group your dedicated SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with whitelabeled IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### PUT /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "PUT" +request.Body = []byte(` { + "name": "new_pool_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IPs in a specified pool. + +**This endpoint allows you to list all of the IP addresses that are in a specific IP pool.** + +IP Pools allow you to group your dedicated SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with whitelabeled IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### GET /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an IP pool. + +**This endpoint allows you to delete an IP pool.** + +IP Pools allow you to group your dedicated SendGrid IP addresses together. For example, you could create separate pools for your transactional and marketing email. When sending marketing emails, specify that you want to use the marketing IP pool. This allows you to maintain separate reputations for your different email traffic. + +IP pools can only be used with whitelabeled IP addresses. + +If an IP pool is NOT specified for an email, it will use any IP available, including ones in pools. + +### DELETE /ips/pools/{pool_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP address to a pool + +**This endpoint allows you to add an IP address to an IP pool.** + +You can add the same IP address to multiple pools. It may take up to 60 seconds for your IP address to be added to a pool after your request is made. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### POST /ips/pools/{pool_name}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "0.0.0.0" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP address from a pool. + +**This endpoint allows you to remove an IP address from an IP pool.** + +The same IP address can be added to multiple IP pools. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### DELETE /ips/pools/{pool_name}/ips/{ip} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/pools/{pool_name}/ips/{ip}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP to warmup + +**This endpoint allows you to enter an IP address into warmup mode.** + +SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### POST /ips/warmup + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "0.0.0.0" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IPs currently in warmup + +**This endpoint allows you to retrieve all of your IP addresses that are currently warming up.** + +SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### GET /ips/warmup + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve warmup status for a specific IP address + +**This endpoint allows you to retrieve the warmup status for a specific IP address.** + +SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### GET /ips/warmup/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup/{ip_address}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP from warmup + +**This endpoint allows you to remove an IP address from warmup mode.** + +SendGrid can automatically warm up dedicated IP addresses by limiting the amount of mail that can be sent through them per hour, with the limit determined by how long the IP address has been in warmup. See the [warmup schedule](https://sendgrid.com/docs/API_Reference/Web_API_v3/IP_Management/ip_warmup_schedule.html) for more details on how SendGrid limits your email traffic for IPs in warmup. + +For more general information about warming up IPs, please see our [Classroom](https://sendgrid.com/docs/Classroom/Deliver/Delivery_Introduction/warming_up_ips.html). + +### DELETE /ips/warmup/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/warmup/{ip_address}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IP pools an IP address belongs to + +**This endpoint allows you to see which IP pools a particular IP address has been added to.** + +The same IP address can be added to multiple IP pools. + +A single IP address or a range of IP addresses may be dedicated to an account in order to send email for multiple domains. The reputation of this IP is based on the aggregate performance of all the senders who use it. + +### GET /ips/{ip_address} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/ips/{ip_address}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAIL + +## Create a batch ID + +**This endpoint allows you to generate a new batch ID. This batch ID can be associated with scheduled sends via the mail/send endpoint.** + +If you set the SMTPAPI header `batch_id`, it allows you to then associate multiple scheduled mail/send requests together with the same ID. Then at anytime up to 10 minutes before the schedule date, you can cancel all of the mail/send requests that have this batch ID by calling the Cancel Scheduled Send endpoint. + +More Information: + +* [Scheduling Parameters > Batch ID](https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) + +### POST /mail/batch + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/batch", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate batch ID + +**This endpoint allows you to validate a batch ID.** + +If you set the SMTPAPI header `batch_id`, it allows you to then associate multiple scheduled mail/send requests together with the same ID. Then at anytime up to 10 minutes before the schedule date, you can cancel all of the mail/send requests that have this batch ID by calling the Cancel Scheduled Send endpoint. + +More Information: + +* [Scheduling Parameters > Batch ID](https://sendgrid.com/docs/API_Reference/SMTP_API/scheduling_parameters.html) + +### GET /mail/batch/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/batch/{batch_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## v3 Mail Send + +This endpoint allows you to send email over SendGrids v3 Web API, the most recent version of our API. If you are looking for documentation about the v2 Mail Send endpoint, please see our [v2 API Reference](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). + +* Top level parameters are referred to as "global". +* Individual fields within the personalizations array will override any other global, or message level, parameters that are defined outside of personalizations. + +For an overview of the v3 Mail Send endpoint, please visit our [v3 API Reference](https://sendgrid.com/docs/API_Reference/Web_API_v3/Mail/index.html) + +For more detailed information about how to use the v3 Mail Send endpoint, please visit our [Classroom](https://sendgrid.com/docs/Classroom/Send/v3_Mail_Send/index.html). + +### POST /mail/send +This endpoint has a helper, check it out [here](https://github.com/sendgrid/sendgrid-go/blob/master/helpers/mail/README.md). + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail/send", host) +request.Method = "POST" +request.Body = []byte(` { + "asm": { + "group_id": 1, + "groups_to_display": [ + 1, + 2, + 3 + ] + }, + "attachments": [ + { + "content": "[BASE64 encoded content block here]", + "content_id": "ii_139db99fdb5c3704", + "disposition": "inline", + "filename": "file1.jpg", + "name": "file1", + "type": "jpg" + } + ], + "batch_id": "[YOUR BATCH ID GOES HERE]", + "categories": [ + "category1", + "category2" + ], + "content": [ + { + "type": "text/html", + "value": "

Hello, world!

" + } + ], + "custom_args": { + "New Argument 1": "New Value 1", + "activationAttempt": "1", + "customerAccountNumber": "[CUSTOMER ACCOUNT NUMBER GOES HERE]" + }, + "from": { + "email": "sam.smith@example.com", + "name": "Sam Smith" + }, + "headers": {}, + "ip_pool_name": "[YOUR POOL NAME GOES HERE]", + "mail_settings": { + "bcc": { + "email": "ben.doe@example.com", + "enable": true + }, + "bypass_list_management": { + "enable": true + }, + "footer": { + "enable": true, + "html": "

Thanks
The SendGrid Team

", + "text": "Thanks,/n The SendGrid Team" + }, + "sandbox_mode": { + "enable": false + }, + "spam_check": { + "enable": true, + "post_to_url": "http://example.com/compliance", + "threshold": 3 + } + }, + "personalizations": [ + { + "bcc": [ + { + "email": "sam.doe@example.com", + "name": "Sam Doe" + } + ], + "cc": [ + { + "email": "jane.doe@example.com", + "name": "Jane Doe" + } + ], + "custom_args": { + "New Argument 1": "New Value 1", + "activationAttempt": "1", + "customerAccountNumber": "[CUSTOMER ACCOUNT NUMBER GOES HERE]" + }, + "headers": { + "X-Accept-Language": "en", + "X-Mailer": "MyApp" + }, + "send_at": 1409348513, + "subject": "Hello, World!", + "substitutions": { + "id": "substitutions", + "type": "object" + }, + "to": [ + { + "email": "john.doe@example.com", + "name": "John Doe" + } + ] + } + ], + "reply_to": { + "email": "sam.smith@example.com", + "name": "Sam Smith" + }, + "sections": { + "section": { + ":sectionName1": "section 1 text", + ":sectionName2": "section 2 text" + } + }, + "send_at": 1409348513, + "subject": "Hello, World!", + "template_id": "[YOUR TEMPLATE ID GOES HERE]", + "tracking_settings": { + "click_tracking": { + "enable": true, + "enable_text": true + }, + "ganalytics": { + "enable": true, + "utm_campaign": "[NAME OF YOUR REFERRER SOURCE]", + "utm_content": "[USE THIS SPACE TO DIFFERENTIATE YOUR EMAIL FROM ADS]", + "utm_medium": "[NAME OF YOUR MARKETING MEDIUM e.g. email]", + "utm_name": "[NAME OF YOUR CAMPAIGN]", + "utm_term": "[IDENTIFY PAID KEYWORDS HERE]" + }, + "open_tracking": { + "enable": true, + "substitution_tag": "%opentrack" + }, + "subscription_tracking": { + "enable": true, + "html": "If you would like to unsubscribe and stop receiving these emails <% clickhere %>.", + "substitution_tag": "<%click here%>", + "text": "If you would like to unsubscribe and stop receiving these emails <% click here %>." + } + } +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAIL SETTINGS + +## Retrieve all mail settings + +**This endpoint allows you to retrieve a list of all mail settings.** + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update address whitelist mail settings + +**This endpoint allows you to update your current email address whitelist settings.** + +The address whitelist setting whitelists a specified email address or domain for which mail should never be suppressed. For example, you own the domain example.com, and one or more of your recipients use email@example.com addresses, by placing example.com in the address whitelist setting, all bounces, blocks, and unsubscribes logged for that domain will be ignored and sent as if under normal sending conditions. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/address_whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "list": [ + "email1@example.com", + "example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve address whitelist mail settings + +**This endpoint allows you to retrieve your current email address whitelist settings.** + +The address whitelist setting whitelists a specified email address or domain for which mail should never be suppressed. For example, you own the domain example.com, and one or more of your recipients use email@example.com addresses, by placing example.com in the address whitelist setting, all bounces, blocks, and unsubscribes logged for that domain will be ignored and sent as if under normal sending conditions. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/address_whitelist + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/address_whitelist", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update BCC mail settings + +**This endpoint allows you to update your current BCC mail settings.** + +When the BCC mail setting is enabled, SendGrid will automatically send a blind carbon copy (BCC) to an address for every email sent without adding that address to the header. Please note that only one email address may be entered in this field, if you wish to distribute BCCs to multiple addresses you will need to create a distribution group or use forwarding rules. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/bcc + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "email@example.com", + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all BCC mail settings + +**This endpoint allows you to retrieve your current BCC mail settings.** + +When the BCC mail setting is enabled, SendGrid will automatically send a blind carbon copy (BCC) to an address for every email sent without adding that address to the header. Please note that only one email address may be entered in this field, if you wish to distribute BCCs to multiple addresses you will need to create a distribution group or use forwarding rules. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/bcc + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bcc", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update bounce purge mail settings + +**This endpoint allows you to update your current bounce purge settings.** + +This setting allows you to set a schedule for SendGrid to automatically delete contacts from your soft and hard bounce suppression lists. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/bounce_purge + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "hard_bounces": 5, + "soft_bounces": 5 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve bounce purge mail settings + +**This endpoint allows you to retrieve your current bounce purge settings.** + +This setting allows you to set a schedule for SendGrid to automatically delete contacts from your soft and hard bounce suppression lists. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/bounce_purge + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/bounce_purge", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update footer mail settings + +**This endpoint allows you to update your current Footer mail settings.** + +The footer setting will insert a custom footer at the bottom of the text and HTML bodies. Use the embedded HTML editor and plain text entry fields to create the content of the footers to be inserted into your emails. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/footer + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "...", + "plain_content": "..." +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve footer mail settings + +**This endpoint allows you to retrieve your current Footer mail settings.** + +The footer setting will insert a custom footer at the bottom of the text and HTML bodies. Use the embedded HTML editor and plain text entry fields to create the content of the footers to be inserted into your emails. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/footer + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/footer", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update forward bounce mail settings + +**This endpoint allows you to update your current bounce forwarding mail settings.** + +Activating this setting allows you to specify an email address to which bounce reports are forwarded. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/forward_bounce + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "example@example.com", + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve forward bounce mail settings + +**This endpoint allows you to retrieve your current bounce forwarding mail settings.** + +Activating this setting allows you to specify an email address to which bounce reports are forwarded. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/forward_bounce + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_bounce", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update forward spam mail settings + +**This endpoint allows you to update your current Forward Spam mail settings.** + +Enabling the forward spam setting allows you to specify an email address to which spam reports will be forwarded. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/forward_spam + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host) +request.Method = "PATCH" +request.Body = []byte(` { + "email": "", + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve forward spam mail settings + +**This endpoint allows you to retrieve your current Forward Spam mail settings.** + +Enabling the forward spam setting allows you to specify an email address to which spam reports will be forwarded. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/forward_spam + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/forward_spam", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update plain content mail settings + +**This endpoint allows you to update your current Plain Content mail settings.** + +The plain content setting will automatically convert any plain text emails that you send to HTML before sending. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/plain_content + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve plain content mail settings + +**This endpoint allows you to retrieve your current Plain Content mail settings.** + +The plain content setting will automatically convert any plain text emails that you send to HTML before sending. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/plain_content + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/plain_content", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update spam check mail settings + +**This endpoint allows you to update your current spam checker mail settings.** + +The spam checker filter notifies you when emails are detected that exceed a predefined spam threshold. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/spam_check + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "max_score": 5, + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve spam check mail settings + +**This endpoint allows you to retrieve your current Spam Checker mail settings.** + +The spam checker filter notifies you when emails are detected that exceed a predefined spam threshold. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/spam_check + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/spam_check", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update template mail settings + +**This endpoint allows you to update your current legacy email template settings.** + +This setting refers to our original email templates. We currently support more fully featured [transactional templates](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +The legacy email template setting wraps an HTML template around your email content. This can be useful for sending out marketing email and/or other HTML formatted messages. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### PATCH /mail_settings/template + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "<% body %>" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve legacy template mail settings + +**This endpoint allows you to retrieve your current legacy email template settings.** + +This setting refers to our original email templates. We currently support more fully featured [transactional templates](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +The legacy email template setting wraps an HTML template around your email content. This can be useful for sending out marketing email and/or other HTML formatted messages. + +Mail settings allow you to tell SendGrid specific things to do to every email that you send to your recipients over SendGrids [Web API](https://sendgrid.com/docs/API_Reference/Web_API/mail.html) or [SMTP Relay](https://sendgrid.com/docs/API_Reference/SMTP_API/index.html). + +### GET /mail_settings/template + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mail_settings/template", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# MAILBOX PROVIDERS + +## Retrieve email statistics by mailbox provider. + +**This endpoint allows you to retrieve your email statistics segmented by recipient mailbox provider.** + +**We only store up to 7 days of email activity in our database.** By default, 500 items will be returned per request via the Advanced Stats API endpoints. + +Advanced Stats provide a more in-depth view of your email statistics and the actions taken by your recipients. You can segment these statistics by geographic location, device type, client type, browser, and mailbox provider. For more information about statistics, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/index.html). + +### GET /mailbox_providers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/mailbox_providers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["mailbox_providers"] = "test_string" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# PARTNER SETTINGS + +## Returns a list of all partner settings. + +**This endpoint allows you to retrieve a list of all partner settings that you can enable.** + +Our partner settings allow you to integrate your SendGrid account with our partners to increase your SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +### GET /partner_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Updates New Relic partner settings. + +**This endpoint allows you to update or change your New Relic partner settings.** + +Our partner settings allow you to integrate your SendGrid account with our partners to increase your SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +By integrating with New Relic, you can send your SendGrid email statistics to your New Relic Dashboard. If you enable this setting, your stats will be sent to New Relic every 5 minutes. You will need your New Relic License Key to enable this setting. For more information, please see our [Classroom](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/new_relic.html). + +### PATCH /partner_settings/new_relic + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings/new_relic", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enable_subuser_statistics": true, + "enabled": true, + "license_key": "" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Returns all New Relic partner settings. + +**This endpoint allows you to retrieve your current New Relic partner settings.** + +Our partner settings allow you to integrate your SendGrid account with our partners to increase your SendGrid experience and functionality. For more information about our partners, and how you can begin integrating with them, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/partners.html). + +By integrating with New Relic, you can send your SendGrid email statistics to your New Relic Dashboard. If you enable this setting, your stats will be sent to New Relic every 5 minutes. You will need your New Relic License Key to enable this setting. For more information, please see our [Classroom](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/new_relic.html). + +### GET /partner_settings/new_relic + +```go +request := sendgrid.GetRequest(apiKey, "/v3/partner_settings/new_relic", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SCOPES + +## Retrieve a list of scopes for which this user has access. + +**This endpoint returns a list of all scopes that this user has access to.** + +API Keys can be used to authenticate the use of [SendGrids v3 Web API](https://sendgrid.com/docs/API_Reference/Web_API_v3/index.html), or the [Mail API Endpoint](https://sendgrid.com/docs/API_Reference/Web_API/mail.html). API Keys may be assigned certain permissions, or scopes, that limit which API endpoints they are able to access. For a more detailed explanation of how you can use API Key permissions, please visit our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/api_keys.html#-API-Key-Permissions) or [Classroom](https://sendgrid.com/docs/Classroom/Basics/API/api_key_permissions.html). + +### GET /scopes + +```go +request := sendgrid.GetRequest(apiKey, "/v3/scopes", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SENDERS + +## Create a Sender Identity + +**This endpoint allows you to create a new sender identity.** + +*You may create up to 100 unique sender identities.* + +Sender Identities are required to be verified before use. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +### POST /senders + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders", host) +request.Method = "POST" +request.Body = []byte(` { + "address": "123 Elm St.", + "address_2": "Apt. 456", + "city": "Denver", + "country": "United States", + "from": { + "email": "from@example.com", + "name": "Example INC" + }, + "nickname": "My Sender ID", + "reply_to": { + "email": "replyto@example.com", + "name": "Example INC" + }, + "state": "Colorado", + "zip": "80202" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get all Sender Identities + +**This endpoint allows you to retrieve a list of all sender identities that have been created for your account.** + +Sender Identities are required to be verified before use. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +### GET /senders + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Sender Identity + +**This endpoint allows you to update a sender identity.** + +Updates to `from.email` require re-verification. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +Partial updates are allowed, but fields that are marked as "required" in the POST (create) endpoint must not be nil if that field is included in the PATCH request. + +### PATCH /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "address": "123 Elm St.", + "address_2": "Apt. 456", + "city": "Denver", + "country": "United States", + "from": { + "email": "from@example.com", + "name": "Example INC" + }, + "nickname": "My Sender ID", + "reply_to": { + "email": "replyto@example.com", + "name": "Example INC" + }, + "state": "Colorado", + "zip": "80202" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## View a Sender Identity + +**This endpoint allows you to retrieve a specific sender identity.** + +Sender Identities are required to be verified before use. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +### GET /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Sender Identity + +**This endpoint allows you to delete one of your sender identities.** + +Sender Identities are required to be verified before use. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +### DELETE /senders/{sender_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Resend Sender Identity Verification + +**This endpoint allows you to resend a sender identity verification email.** + +Sender Identities are required to be verified before use. If your domain has been whitelabeled it will auto verify on creation. Otherwise an email will be sent to the `from.email`. + +### POST /senders/{sender_id}/resend_verification + +```go +request := sendgrid.GetRequest(apiKey, "/v3/senders/{sender_id}/resend_verification", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# STATS + +## Retrieve global email statistics + +**This endpoint allows you to retrieve all of your global email statistics between a given date range.** + +Parent accounts will see aggregated stats for their account and all subuser accounts. Subuser accounts will only see their own stats. + +### GET /stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SUBUSERS + +## Create Subuser + +This endpoint allows you to retrieve a list of all of your subusers. You can choose to retrieve specific subusers as well as limit the results that come back from the API. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### POST /subusers + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers", host) +request.Method = "POST" +request.Body = []byte(` { + "email": "John@example.com", + "ips": [ + "1.1.1.1", + "2.2.2.2" + ], + "password": "johns_password", + "username": "John@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List all Subusers + +This endpoint allows you to retrieve a list of all of your subusers. You can choose to retrieve specific subusers as well as limit the results that come back from the API. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### GET /subusers + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Subuser Reputations + +Subuser sender reputations give a good idea how well a sender is doing with regards to how recipients and recipient servers react to the mail that is being received. When a bounce, spam report, or other negative action happens on a sent email, it will effect your sender rating. + +This endpoint allows you to request the reputations for your subusers. + +### GET /subusers/reputations + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/reputations", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["usernames"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve email statistics for your subusers. + +**This endpoint allows you to retrieve the email statistics for the given subusers.** + +You may retrieve statistics for up to 10 different subusers by including an additional _subusers_ parameter for each additional subuser. + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["subusers"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve monthly stats for all subusers + +**This endpoint allows you to retrieve the monthly email statistics for all subusers over the given date range.** + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats for your subusers. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +When using the `sort_by_metric` to sort your stats by a specific metric, you can not sort by the following metrics: +`bounce_drops`, `deferred`, `invalid_emails`, `processed`, `spam_report_drops`, `spam_reports`, or `unsubscribe_drops`. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats/monthly + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats/monthly", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["subuser"] = "test_string" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["date"] = "test_string" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the totals for each email statistic metric for all subusers. + +**This endpoint allows you to retrieve the total sums of each email statistic metric for all subusers over the given date range.** + + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/stats/sums + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/stats/sums", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["end_date"] = "2016-04-01" +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +queryParams["start_date"] = "2016-01-01" +queryParams["sort_by_direction"] = "asc" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Enable/disable a subuser + +This endpoint allows you to enable or disable a subuser. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### PATCH /subusers/{subuser_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "disabled": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a subuser + +This endpoint allows you to delete a subuser. This is a permanent action, once deleted a subuser cannot be retrieved. + +For more information about Subusers: + +* [User Guide > Subusers](https://sendgrid.com/docs/User_Guide/Settings/Subusers/index.html) +* [Classroom > How do I add more subusers to my account?](https://sendgrid.com/docs/Classroom/Basics/Account/how_do_i_add_more_subusers_to_my_account.html) + +### DELETE /subusers/{subuser_name} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update IPs assigned to a subuser + +Each subuser should be assigned to an IP address, from which all of this subuser's mail will be sent. Often, this is the same IP as the parent account, but each subuser can have their own, or multiple, IP addresses as well. + +More information: + +* [How to request more IPs](https://sendgrid.com/docs/Classroom/Basics/Account/adding_an_additional_dedicated_ip_to_your_account.html) +* [IPs can be whitelabeled](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/ips.html) + +### PUT /subusers/{subuser_name}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/ips", host) +request.Method = "PUT" +request.Body = []byte(` [ + "127.0.0.1" +]`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Monitor Settings for a subuser + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### PUT /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "PUT" +request.Body = []byte(` { + "email": "example@example.com", + "frequency": 500 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create monitor settings + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### POST /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "POST" +request.Body = []byte(` { + "email": "example@example.com", + "frequency": 50000 +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve monitor settings for a subuser + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### GET /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete monitor settings + +Subuser monitor settings allow you to receive a sample of an outgoing message by a specific customer at a specific frequency of emails. + +### DELETE /subusers/{subuser_name}/monitor + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/monitor", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve the monthly email statistics for a single subuser + +**This endpoint allows you to retrieve the monthly email statistics for a specific subuser.** + +While you can always view the statistics for all email activity on your account, subuser statistics enable you to view specific segments of your stats for your subusers. Emails sent, bounces, and spam reports are always tracked for subusers. Unsubscribes, clicks, and opens are tracked if you have enabled the required settings. + +When using the `sort_by_metric` to sort your stats by a specific metric, you can not sort by the following metrics: +`bounce_drops`, `deferred`, `invalid_emails`, `processed`, `spam_report_drops`, `spam_reports`, or `unsubscribe_drops`. + +For more information, see our [User Guide](https://sendgrid.com/docs/User_Guide/Statistics/subuser.html). + +### GET /subusers/{subuser_name}/stats/monthly + +```go +request := sendgrid.GetRequest(apiKey, "/v3/subusers/{subuser_name}/stats/monthly", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["date"] = "test_string" +queryParams["sort_by_direction"] = "asc" +queryParams["limit"] = "1" +queryParams["sort_by_metric"] = "test_string" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# SUPPRESSION + +## Retrieve all blocks + +**This endpoint allows you to retrieve a list of all email addresses that are currently on your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### GET /suppression/blocks + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete blocks + +**This endpoint allows you to delete all email addresses on your blocks list.** + +There are two options for deleting blocked emails: + +1. You can delete all blocked emails by setting `delete_all` to true in the request body. +2. You can delete some blocked emails by specifying the email addresses in an array in the request body. + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### DELETE /suppression/blocks + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific block + +**This endpoint allows you to retrieve a specific email address from your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### GET /suppression/blocks/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific block + +**This endpoint allows you to delete a specific email address from your blocks list.** + +[Blocks](https://sendgrid.com/docs/Glossary/blocks.html) happen when your message was rejected for a reason related to the message, not the recipient address. This can happen when your mail server IP address has been added to a blacklist or blocked by an ISP, or if the message content is flagged by a filter on the receiving server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/blocks.html). + +### DELETE /suppression/blocks/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/blocks/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all bounces + +**This endpoint allows you to retrieve all of your bounces.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) + +### GET /suppression/bounces + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["end_time"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete bounces + +**This endpoint allows you to delete all of your bounces. You can also use this endpoint to remove a specific email address from your bounce list.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +Note: the `delete_all` and `emails` parameters should be used independently of each other as they have different purposes. + +### DELETE /suppression/bounces + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": true, + "emails": [ + "example@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Bounce + +**This endpoint allows you to retrieve a specific bounce for a given email address.** + +Bounces are messages that are returned to the server that sent it. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +### GET /suppression/bounces/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a bounce + +**This endpoint allows you to remove an email address from your bounce list.** + +Bounces are messages that are returned to the server that sent it. This endpoint allows you to delete a single email addresses from your bounce list. + +For more information see: + +* [User Guide > Bounces](https://sendgrid.com/docs/User_Guide/Suppressions/bounces.html) for more information +* [Glossary > Bounces](https://sendgrid.com/docs/Glossary/Bounces.html) +* [Classroom > List Scrubbing Guide](https://sendgrid.com/docs/Classroom/Deliver/list_scrubbing.html) + +### DELETE /suppression/bounces/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/bounces/{email}", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["email_address"] = "example@example.com" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all invalid emails + +**This endpoint allows you to retrieve a list of all invalid email addresses.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipients mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### GET /suppression/invalid_emails + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete invalid emails + +**This endpoint allows you to remove email addresses from your invalid email address list.** + +There are two options for deleting invalid email addresses: + +1) You can delete all invalid email addresses by setting `delete_all` to true in the request body. +2) You can delete some invalid email addresses by specifying certain addresses in an array in the request body. + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipients mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### DELETE /suppression/invalid_emails + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific invalid email + +**This endpoint allows you to retrieve a specific invalid email addresses.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipients mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### GET /suppression/invalid_emails/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific invalid email + +**This endpoint allows you to remove a specific email address from the invalid email address list.** + +An invalid email occurs when you attempt to send email to an address that is formatted in a manner that does not meet internet email format standards or the email does not exist at the recipients mail server. + +Examples include addresses without the @ sign or addresses that include certain special characters and/or spaces. This response can come from our own server or the recipient mail server. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/invalid_emails.html). + +### DELETE /suppression/invalid_emails/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/invalid_emails/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific spam report + +**This endpoint allows you to retrieve a specific spam report.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### GET /suppression/spam_report/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_report/{email}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a specific spam report + +**This endpoint allows you to delete a specific spam report.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### DELETE /suppression/spam_report/{email} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_report/{email}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all spam reports + +**This endpoint allows you to retrieve all spam reports.** + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### GET /suppression/spam_reports + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_reports", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete spam reports + +**This endpoint allows you to delete your spam reports.** + +There are two options for deleting spam reports: + +1) You can delete all spam reports by setting "delete_all" to true in the request body. +2) You can delete some spam reports by specifying the email addresses in an array in the request body. + +[Spam reports](https://sendgrid.com/docs/Glossary/spam_reports.html) happen when a recipient indicates that they think your email is [spam](https://sendgrid.com/docs/Glossary/spam.html) and then their email provider reports this to SendGrid. + +For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/spam_reports.html). + +### DELETE /suppression/spam_reports + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/spam_reports", host) +request.Method = "DELETE" +request.Body = []byte(` { + "delete_all": false, + "emails": [ + "example1@example.com", + "example2@example.com" + ] +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all global suppressions + +**This endpoint allows you to retrieve a list of all email address that are globally suppressed.** + +A global suppression (or global unsubscribe) is an email address of a recipient who does not want to receive any of your messages. A globally suppressed recipient will be removed from any email you send. For more information, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Suppressions/global_unsubscribes.html). + +### GET /suppression/unsubscribes + +```go +request := sendgrid.GetRequest(apiKey, "/v3/suppression/unsubscribes", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["start_time"] = "1" +queryParams["limit"] = "1" +queryParams["end_time"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# TEMPLATES + +## Create a transactional template. + +**This endpoint allows you to create a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +### POST /templates + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates", host) +request.Method = "POST" +request.Body = []byte(` { + "name": "example_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all transactional templates. + +**This endpoint allows you to retrieve all transactional templates.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +### GET /templates + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Edit a transactional template. + +**This endpoint allows you to edit a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### PATCH /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "name": "new_example_name" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a single transactional template. + +**This endpoint allows you to retrieve a single transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### GET /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a template. + +**This endpoint allows you to delete a transactional template.** + +Each user can create up to 300 different transactional templates. Transactional templates are specific to accounts and subusers. Templates created on a parent account will not be accessible from the subuser accounts. + +Transactional templates are templates created specifically for transactional email and are not to be confused with [Marketing Campaigns templates](https://sendgrid.com/docs/User_Guide/Marketing_Campaigns/templates.html). For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### DELETE /templates/{template_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a new transactional template version. + +**This endpoint allows you to create a new version of a template.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + + +### POST /templates/{template_id}/versions + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions", host) +request.Method = "POST" +request.Body = []byte(` { + "active": 1, + "html_content": "<%body%>", + "name": "example_version_name", + "plain_content": "<%body%>", + "subject": "<%subject%>", + "template_id": "ddb96bbc-9b92-425e-8979-99464621b543" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Edit a transactional template version. + +**This endpoint allows you to edit a version of one of your transactional templates.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### PATCH /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "active": 1, + "html_content": "<%body%>", + "name": "updated_example_name", + "plain_content": "<%body%>", + "subject": "<%subject%>" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific transactional template version. + +**This endpoint allows you to retrieve a specific version of a template.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### GET /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a transactional template version. + +**This endpoint allows you to delete one of your transactional template versions.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### DELETE /templates/{template_id}/versions/{version_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Activate a transactional template version. + +**This endpoint allows you to activate a version of one of your templates.** + +Each transactional template can have multiple versions, each version with its own subject and content. Each user can have up to 300 versions across across all templates. + + +For more information about transactional templates, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| template_id | string | The ID of the original template | +| version_id | string | The ID of the template version | + +### POST /templates/{template_id}/versions/{version_id}/activate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/templates/{template_id}/versions/{version_id}/activate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# TRACKING SETTINGS + +## Retrieve Tracking Settings + +**This endpoint allows you to retrieve a list of all tracking settings that you can enable on your account.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Click Tracking Settings + +**This endpoint allows you to change your current click tracking setting. You can enable, or disable, click tracking using this endpoint.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/click + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/click", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Click Track Settings + +**This endpoint allows you to retrieve your current click tracking setting.** + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/click + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/click", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Google Analytics Settings + +**This endpoint allows you to update your current setting for Google Analytics.** + +For more information about using Google Analytics, please refer to [Googles URL Builder](https://support.google.com/analytics/answer/1033867?hl=en) and their article on ["Best Practices for Campaign Building"](https://support.google.com/analytics/answer/1037445). + +We default the settings to Googles recommendations. For more information, see [Google Analytics Demystified](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/google_analytics_demystified_ga_statistics_vs_sg_statistics.html). + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/google_analytics + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/google_analytics", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "utm_campaign": "website", + "utm_content": "", + "utm_medium": "email", + "utm_source": "sendgrid.com", + "utm_term": "" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Google Analytics Settings + +**This endpoint allows you to retrieve your current setting for Google Analytics.** + +For more information about using Google Analytics, please refer to [Googles URL Builder](https://support.google.com/analytics/answer/1033867?hl=en) and their article on ["Best Practices for Campaign Building"](https://support.google.com/analytics/answer/1037445). + +We default the settings to Googles recommendations. For more information, see [Google Analytics Demystified](https://sendgrid.com/docs/Classroom/Track/Collecting_Data/google_analytics_demystified_ga_statistics_vs_sg_statistics.html). + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/google_analytics + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/google_analytics", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Open Tracking Settings + +**This endpoint allows you to update your current settings for open tracking.** + +Open Tracking adds an invisible image at the end of the email which can track email opens. If the email recipient has images enabled on their email client, a request to SendGrids server for the invisible image is executed and an open event is logged. These events are logged in the Statistics portal, Email Activity interface, and are reported by the Event Webhook. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/open + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/open", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get Open Tracking Settings + +**This endpoint allows you to retrieve your current settings for open tracking.** + +Open Tracking adds an invisible image at the end of the email which can track email opens. If the email recipient has images enabled on their email client, a request to SendGrids server for the invisible image is executed and an open event is logged. These events are logged in the Statistics portal, Email Activity interface, and are reported by the Event Webhook. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/open + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/open", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Subscription Tracking Settings + +**This endpoint allows you to update your current settings for subscription tracking.** + +Subscription tracking adds links to the bottom of your emails that allows your recipients to subscribe to, or unsubscribe from, your emails. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### PATCH /tracking_settings/subscription + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/subscription", host) +request.Method = "PATCH" +request.Body = []byte(` { + "enabled": true, + "html_content": "html content", + "landing": "landing page html", + "plain_content": "text content", + "replace": "replacement tag", + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Subscription Tracking Settings + +**This endpoint allows you to retrieve your current settings for subscription tracking.** + +Subscription tracking adds links to the bottom of your emails that allows your recipients to subscribe to, or unsubscribe from, your emails. + +You can track a variety of the actions your recipients may take when interacting with your emails including opening your emails, clicking on links in your emails, and subscribing to (or unsubscribing from) your emails. + +For more information about tracking, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/tracking.html). + +### GET /tracking_settings/subscription + +```go +request := sendgrid.GetRequest(apiKey, "/v3/tracking_settings/subscription", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# USER + +## Get a user's account information. + +**This endpoint allows you to retrieve your user account details.** + +Your user's account information includes the user's account type and reputation. + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/account + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/account", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your credit balance + +**This endpoint allows you to retrieve the current credit balance for your account.** + +Your monthly credit allotment limits the number of emails you may send before incurring overage charges. For more information about credits and billing, please visit our [Classroom](https://sendgrid.com/docs/Classroom/Basics/Billing/billing_info_and_faqs.html). + +### GET /user/credits + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/credits", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your account email address + +**This endpoint allows you to update the email address currently on file for your account.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/email + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) +request.Method = "PUT" +request.Body = []byte(` { + "email": "example@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your account email address + +**This endpoint allows you to retrieve the email address currently on file for your account.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/email + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/email", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your password + +**This endpoint allows you to update your password.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/password + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/password", host) +request.Method = "PUT" +request.Body = []byte(` { + "new_password": "new_password", + "old_password": "old_password" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a user's profile + +**This endpoint allows you to update your current profile details.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +It should be noted that any one or more of the parameters can be updated via the PATCH /user/profile endpoint. The only requirement is that you include at least one when you PATCH. + +### PATCH /user/profile + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) +request.Method = "PATCH" +request.Body = []byte(` { + "city": "Orange", + "first_name": "Example", + "last_name": "User" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get a user's profile + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/profile + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/profile", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Cancel or pause a scheduled send + +**This endpoint allows you to cancel or pause an email that has been scheduled to be sent.** + +If the maximum number of cancellations/pauses are added, HTTP 400 will +be returned. + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header.Scheduled sends cancelled less than 10 minutes before the scheduled time are not guaranteed to be cancelled. + +### POST /user/scheduled_sends + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) +request.Method = "POST" +request.Body = []byte(` { + "batch_id": "YOUR_BATCH_ID", + "status": "pause" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all scheduled sends + +**This endpoint allows you to retrieve all cancel/paused scheduled send information.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header.Scheduled sends cancelled less than 10 minutes before the scheduled time are not guaranteed to be cancelled. + +### GET /user/scheduled_sends + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update user scheduled send information + +**This endpoint allows you to update the status of a scheduled send for the given `batch_id`.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header.Scheduled sends cancelled less than 10 minutes before the scheduled time are not guaranteed to be cancelled. + +### PATCH /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "status": "pause" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve scheduled send + +**This endpoint allows you to retrieve the cancel/paused scheduled send information for a specific `batch_id`.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header.Scheduled sends cancelled less than 10 minutes before the scheduled time are not guaranteed to be cancelled. + +### GET /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a cancellation or pause of a scheduled send + +**This endpoint allows you to delete the cancellation/pause of a scheduled send.** + +The Cancel Scheduled Sends feature allows the customer to cancel a scheduled send based on a Batch ID included in the SMTPAPI header.Scheduled sends cancelled less than 10 minutes before the scheduled time are not guaranteed to be cancelled. + +### DELETE /user/scheduled_sends/{batch_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/scheduled_sends/{batch_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Enforced TLS settings + +**This endpoint allows you to update your current Enforced TLS settings.** + +The Enforced TLS settings specify whether or not the recipient is required to support TLS or have a valid certificate. See the [SMTP Ports User Guide](https://sendgrid.com/docs/Classroom/Basics/Email_Infrastructure/smtp_ports.html) for more information on opportunistic TLS. + +**Note:** If either setting is enabled and the recipient does not support TLS or have a valid certificate, we drop the message and send a block event with TLS required but not supported as the description. + +### PATCH /user/settings/enforced_tls + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) +request.Method = "PATCH" +request.Body = []byte(` { + "require_tls": true, + "require_valid_cert": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve current Enforced TLS settings. + +**This endpoint allows you to retrieve your current Enforced TLS settings.** + +The Enforced TLS settings specify whether or not the recipient is required to support TLS or have a valid certificate. See the [SMTP Ports User Guide](https://sendgrid.com/docs/Classroom/Basics/Email_Infrastructure/smtp_ports.html) for more information on opportunistic TLS. + +**Note:** If either setting is enabled and the recipient does not support TLS or have a valid certificate, we drop the message and send a block event with TLS required but not supported as the description. + +### GET /user/settings/enforced_tls + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/settings/enforced_tls", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update your username + +**This endpoint allows you to update the username for your account.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### PUT /user/username + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) +request.Method = "PUT" +request.Body = []byte(` { + "username": "test_username" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve your username + +**This endpoint allows you to retrieve your current account username.** + +Keeping your user profile up to date is important. This will help SendGrid to verify who you are as well as contact you should we need to. + +For more information about your user profile: + +* [SendGrid Account Settings](https://sendgrid.com/docs/User_Guide/Settings/account.html) + +### GET /user/username + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/username", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update Event Notification Settings + +**This endpoint allows you to update your current event webhook settings.** + +If an event type is marked as `true`, then the event webhook will include information about that event. + +SendGrids Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### PATCH /user/webhooks/event/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) +request.Method = "PATCH" +request.Body = []byte(` { + "bounce": true, + "click": true, + "deferred": true, + "delivered": true, + "dropped": true, + "enabled": true, + "group_resubscribe": true, + "group_unsubscribe": true, + "open": true, + "processed": true, + "spam_report": true, + "unsubscribe": true, + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Event Webhook settings + +**This endpoint allows you to retrieve your current event webhook settings.** + +If an event type is marked as `true`, then the event webhook will include information about that event. + +SendGrids Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### GET /user/webhooks/event/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/settings", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Test Event Notification Settings + +**This endpoint allows you to test your event webhook by sending a fake event notification post to the provided URL.** + +SendGrids Event Webhook will notify a URL of your choice via HTTP POST with information about events that occur as SendGrid processes your email. + +Common uses of this data are to remove unsubscribes, react to spam reports, determine unengaged recipients, identify bounced email addresses, or create advanced analytics of your email program. + +### POST /user/webhooks/event/test + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/event/test", host) +request.Method = "POST" +request.Body = []byte(` { + "url": "url" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a parse setting + +**This endpoint allows you to create a new inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### POST /user/webhooks/parse/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) +request.Method = "POST" +request.Body = []byte(` { + "hostname": "myhostname.com", + "send_raw": false, + "spam_check": true, + "url": "http://email.myhosthame.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all parse settings + +**This endpoint allows you to retrieve all of your current inbound parse settings.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### GET /user/webhooks/parse/settings + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a parse setting + +**This endpoint allows you to update a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### PATCH /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "send_raw": true, + "spam_check": false, + "url": "http://newdomain.com/parse" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a specific parse setting + +**This endpoint allows you to retrieve a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### GET /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a parse setting + +**This endpoint allows you to delete a specific inbound parse setting.** + +The inbound parse webhook allows you to have incoming emails parsed, extracting some or all of the content, and then have that content POSTed by SendGrid to a URL of your choosing. For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Webhooks/parse.html). + +### DELETE /user/webhooks/parse/settings/{hostname} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/settings/{hostname}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieves Inbound Parse Webhook statistics. + +**This endpoint allows you to retrieve the statistics for your Parse Webhook usage.** + +SendGrid's Inbound Parse Webhook allows you to parse the contents and attachments of incoming emails. The Parse API can then POST the parsed emails to a URL that you specify. The Inbound Parse Webhook cannot parse messages greater than 20MB in size, including all attachments. + +There are a number of pre-made integrations for the SendGrid Parse Webhook which make processing events easy. You can find these integrations in the [Library Index](https://sendgrid.com/docs/Integrate/libraries.html#-Webhook-Libraries). + +### GET /user/webhooks/parse/stats + +```go +request := sendgrid.GetRequest(apiKey, "/v3/user/webhooks/parse/stats", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["aggregated_by"] = "day" +queryParams["limit"] = "test_string" +queryParams["start_date"] = "2016-01-01" +queryParams["end_date"] = "2016-04-01" +queryParams["offset"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + +# WHITELABEL + +## Create a domain whitelabel. + +**This endpoint allows you to create a whitelabel for one of your domains.** + +If you are creating a domain whitelabel that you would like a subuser to use, you have two options: +1. Use the "username" parameter. This allows you to create a whitelabel on behalf of your subuser. This means the subuser is able to see and modify the created whitelabel. +2. Use the Association workflow (see Associate Domain section). This allows you to assign a whitelabel created by the parent to a subuser. This means the subuser will default to the assigned whitelabel, but will not be able to see or modify that whitelabel. However, if the subuser creates their own whitelabel it will overwrite the assigned whitelabel. + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +### POST /whitelabel/domains + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host) +request.Method = "POST" +request.Body = []byte(` { + "automatic_security": false, + "custom_spf": true, + "default": true, + "domain": "example.com", + "ips": [ + "192.168.1.1", + "192.168.1.2" + ], + "subdomain": "news", + "username": "john@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List all domain whitelabels. + +**This endpoint allows you to retrieve a list of all domain whitelabels you have created.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + + +### GET /whitelabel/domains + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +queryParams["domain"] = "test_string" +queryParams["exclude_subusers"] = "true" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Get the default domain whitelabel. + +**This endpoint allows you to retrieve the default whitelabel for a domain.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| domain | string |The domain to find a default domain whitelabel for. | + +### GET /whitelabel/domains/default + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/default", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## List the domain whitelabel associated with the given user. + +**This endpoint allows you to retrieve all of the whitelabels that have been assigned to a specific subuser.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain whitelabels can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's whitelabels. To associate a whitelabel with a subuser, the parent account must first create the whitelabel and validate it. The parent may then associate the whitelabel via the subuser management tools. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| username | string | Username of the subuser to find associated whitelabels for. | + +### GET /whitelabel/domains/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Disassociate a domain whitelabel from a given user. + +**This endpoint allows you to disassociate a specific whitelabel from a subuser.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain whitelabels can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's whitelabels. To associate a whitelabel with a subuser, the parent account must first create the whitelabel and validate it. The parent may then associate the whitelabel via the subuser management tools. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Required? | Description | +|---|---|---|---| +| username | string | required | Username for the subuser to find associated whitelabels for. | + +### DELETE /whitelabel/domains/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a domain whitelabel. + +**This endpoint allows you to update the settings for a domain whitelabel.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +### PATCH /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "custom_spf": true, + "default": false +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a domain whitelabel. + +**This endpoint allows you to retrieve a specific domain whitelabel.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + + +### GET /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a domain whitelabel. + +**This endpoint allows you to delete a domain whitelabel.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +### DELETE /whitelabel/domains/{domain_id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Associate a domain whitelabel with a given user. + +**This endpoint allows you to associate a specific domain whitelabel with a subuser.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +Domain whitelabels can be associated with (i.e. assigned to) subusers from a parent account. This functionality allows subusers to send mail using their parent's whitelabels. To associate a whitelabel with a subuser, the parent account must first create the whitelabel and validate it. The parent may then associate the whitelabel via the subuser management tools. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| domain_id | integer | ID of the domain whitelabel to associate with the subuser. | + +### POST /whitelabel/domains/{domain_id}/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}/subuser", host) +request.Method = "POST" +request.Body = []byte(` { + "username": "jane@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Add an IP to a domain whitelabel. + +**This endpoint allows you to add an IP address to a domain whitelabel.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer | ID of the domain to which you are adding an IP | + +### POST /whitelabel/domains/{id}/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "ip": "192.168.0.1" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Remove an IP from a domain whitelabel. + +**This endpoint allows you to remove a domain's IP address from that domain's whitelabel.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer | ID of the domain whitelabel to delete the IP from. | +| ip | string | IP to remove from the domain whitelabel. | + +### DELETE /whitelabel/domains/{id}/ips/{ip} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips/{ip}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate a domain whitelabel. + +**This endpoint allows you to validate a domain whitelabel. If it fails, it will return an error message describing why the whitelabel could not be validated.** + +A domain whitelabel allows you to remove the via or sent on behalf of message that your recipients see when they read your emails. Whitelabeling a domain allows you to replace sendgrid.net with your personal sending domain. You will be required to create a subdomain so that SendGrid can generate the DNS records which you must give to your host provider. If you choose to use Automated Security, SendGrid will provide you with 3 CNAME records. If you turn Automated Security off, you will be given 2 TXT records and 1 MX record. + +For more information on whitelabeling, please see our [User Guide](https://sendgrid.com/docs/User_Guide/Settings/Whitelabel/index.html) + +## URI Parameters +| URI Parameter | Type | Description | +|---|---|---| +| id | integer |ID of the domain whitelabel to validate. | + +### POST /whitelabel/domains/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create an IP whitelabel + +**This endpoint allows you to create an IP whitelabel.** + +When creating an IP whitelable, you should use the same subdomain that you used when you created a domain whitelabel. + +A IP whitelabel consists of a subdomain and domain that will be used to generate a reverse DNS record for a given IP. Once SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/ips.html). + +### POST /whitelabel/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host) +request.Method = "POST" +request.Body = []byte(` { + "domain": "example.com", + "ip": "192.168.1.1", + "subdomain": "email" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all IP whitelabels + +**This endpoint allows you to retrieve all of the IP whitelabels that have been created by this account.** + +You may include a search key by using the "ip" parameter. This enables you to perform a prefix search for a given IP segment (e.g. "192."). + +A IP whitelabel consists of a subdomain and domain that will be used to generate a reverse DNS record for a given IP. Once SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/ips.html). + +### GET /whitelabel/ips + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["ip"] = "test_string" +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve an IP whitelabel + +**This endpoint allows you to retrieve an IP whitelabel.** + +A IP whitelabel consists of a subdomain and domain that will be used to generate a reverse DNS record for a given IP. Once SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/ips.html). + +### GET /whitelabel/ips/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete an IP whitelabel + +**This endpoint allows you to delete an IP whitelabel.** + +A IP whitelabel consists of a subdomain and domain that will be used to generate a reverse DNS record for a given IP. Once SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/ips.html). + +### DELETE /whitelabel/ips/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate an IP whitelabel + +**This endpoint allows you to validate an IP whitelabel.** + +A IP whitelabel consists of a subdomain and domain that will be used to generate a reverse DNS record for a given IP. Once SendGrid has verified that the appropriate A record for the IP has been created, the appropriate reverse DNS record for the IP is generated. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/ips.html). + +### POST /whitelabel/ips/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Create a Link Whitelabel + +**This endpoint allows you to create a new link whitelabel.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### POST /whitelabel/links + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host) +request.Method = "POST" +request.Body = []byte(` { + "default": true, + "domain": "example.com", + "subdomain": "mail" +}`) +queryParams := make(map[string]string) +queryParams["limit"] = "1" +queryParams["offset"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve all link whitelabels + +**This endpoint allows you to retrieve all link whitelabels.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### GET /whitelabel/links + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["limit"] = "1" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Default Link Whitelabel + +**This endpoint allows you to retrieve the default link whitelabel.** + +Default link whitelabel is the actual link whitelabel to be used when sending messages. If there are multiple link whitelabels, the default is determined by the following order: +
    +
  • Validated link whitelabels marked as "default"
  • +
  • Legacy link whitelabels (migrated from the whitelabel wizard)
  • +
  • Default SendGrid link whitelabel (i.e. 100.ct.sendgrid.net)
  • +
+ +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### GET /whitelabel/links/default + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/default", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["domain"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve Associated Link Whitelabel + +**This endpoint allows you to retrieve the associated link whitelabel for a subuser.** + +Link whitelables can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link whitelabels. To associate a link whitelabel, the parent account +must first create a whitelabel and validate it. The parent may then associate that whitelabel with a subuser via the API or the Subuser Management page in the user interface. + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### GET /whitelabel/links/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host) +request.Method = "GET" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Disassociate a Link Whitelabel + +**This endpoint allows you to disassociate a link whitelabel from a subuser.** + +Link whitelables can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link whitelabels. To associate a link whitelabel, the parent account +must first create a whitelabel and validate it. The parent may then associate that whitelabel with a subuser via the API or the Subuser Management page in the user interface. + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### DELETE /whitelabel/links/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host) +request.Method = "DELETE" +queryParams := make(map[string]string) +queryParams["username"] = "test_string" +request.QueryParams = queryParams +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Update a Link Whitelabel + +**This endpoint allows you to update a specific link whitelabel. You can use this endpoint to change a link whitelabel's default status.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### PATCH /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "PATCH" +request.Body = []byte(` { + "default": true +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Retrieve a Link Whitelabel + +**This endpoint allows you to retrieve a specific link whitelabel.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### GET /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "GET" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Delete a Link Whitelabel + +**This endpoint allows you to delete a link whitelabel.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### DELETE /whitelabel/links/{id} + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host) +request.Method = "DELETE" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Validate a Link Whitelabel + +**This endpoint allows you to validate a link whitelabel.** + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### POST /whitelabel/links/{id}/validate + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}/validate", host) +request.Method = "POST" +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + +## Associate a Link Whitelabel + +**This endpoint allows you to associate a link whitelabel with a subuser account.** + +Link whitelables can be associated with subusers from the parent account. This functionality allows +subusers to send mail using their parent's link whitelabels. To associate a link whitelabel, the parent account +must first create a whitelabel and validate it. The parent may then associate that whitelabel with a subuser via the API or the Subuser Management page in the user interface. + +Email link whitelabels allow all of the click-tracked links you send in your emails to include the URL of your domain instead of sendgrid.net. + +For more information, please see our [User Guide](https://sendgrid.com/docs/API_Reference/Web_API_v3/Whitelabel/links.html). + +### POST /whitelabel/links/{link_id}/subuser + +```go +request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{link_id}/subuser", host) +request.Method = "POST" +request.Body = []byte(` { + "username": "jane@example.com" +}`) +response, err := sendgrid.API(request) +if err != nil { + log.Println(err) +} else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) +} +``` + + diff --git a/vendor/github.com/sendgrid/sendgrid-go/USE_CASES.md b/vendor/github.com/sendgrid/sendgrid-go/USE_CASES.md new file mode 100644 index 0000000..3173215 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/USE_CASES.md @@ -0,0 +1,1587 @@ +This documentation provides examples for specific use cases. Please [open an issue](https://github.com/sendgrid/sendgrid-go/issues) or make a pull request for any use cases you would like us to document here. Thank you! + +# Table of Contents + +* [Transactional Templates](#transactional-templates) +* [Legacy Templates](#legacy-templates) +* [CustomArgs](#customargs) +* [Personalizations](#personalizations) +* [Substitutions](#substitutions) +* [Sections](#sections) +* [Attachments](#attachments) +* [How to View Email Statistics](#email-stats) +* [How to Setup a Domain Whitelabel](#whitelabel-domain) + + +# Transactional Templates +For this example, we assume you have created a [dynamic transactional template](https://sendgrid.com/docs/User_Guide/Transactional_Templates/how_to_send_an_email_with_transactional_templates.html). Following is the dynamic template data we used for testing. + +Template ID (replace with your own): + +```text +d-c6dcf1f72bdd4beeb15a9aa6c72fcd2c +``` + +[Template Body](https://github.com/sendgrid/email-templates/blob/master/dynamic-templates/receipt/receipt.html) + +[Template Data](https://github.com/sendgrid/email-templates/blob/master/dynamic-templates/receipt/receipt_data.json) + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" + "os" +) + +func main() { + m := mail.NewV3Mail() + + address := "test@example.com" + name := "Example User" + e := mail.NewEmail(name, address) + m.SetFrom(e) + + m.SetTemplateID("d-c6dcf1f72bdd4beeb15a9aa6c72fcd2c") + + p := mail.NewPersonalization() + tos := []*mail.Email{ + mail.NewEmail("Example User", "test1@example.com"), + } + p.AddTos(tos...) + + p.SetDynamicTemplateData("receipt", "true") + p.SetDynamicTemplateData("total", "$ 239.85") + + items := []struct { + text string + image string + price string + }{ + {"New Line Sneakers", "https://marketing-image-production.s3.amazonaws.com/uploads/8dda1131320a6d978b515cc04ed479df259a458d5d45d58b6b381cae0bf9588113e80ef912f69e8c4cc1ef1a0297e8eefdb7b270064cc046b79a44e21b811802.png", "$ 79.95"}, + {"Old Line Sneakers", "https://marketing-image-production.s3.amazonaws.com/uploads/3629f54390ead663d4eb7c53702e492de63299d7c5f7239efdc693b09b9b28c82c924225dcd8dcb65732d5ca7b7b753c5f17e056405bbd4596e4e63a96ae5018.png", "$ 79.95"}, + {"Blue Line Sneakers", "https://marketing-image-production.s3.amazonaws.com/uploads/00731ed18eff0ad5da890d876c456c3124a4e44cb48196533e9b95fb2b959b7194c2dc7637b788341d1ff4f88d1dc88e23f7e3704726d313c57f350911dd2bd0.png", "$ 79.95"}, + } + + var itemList []map[string]string + var item map[string]string + for _, v := range items { + item = make(map[string]string) + item["text"] = v.text + item["image"] = v.image + item["price"] = v.price + itemList = append(itemList, item) + } + p.SetDynamicTemplateData("items", itemList) + + p.SetDynamicTemplateData("name", "Sample Name") + p.SetDynamicTemplateData("address01", "1234 Fake St.") + p.SetDynamicTemplateData("address02", "Apt. 123") + p.SetDynamicTemplateData("city", "Place") + p.SetDynamicTemplateData("state", "CO") + p.SetDynamicTemplateData("zip", "80202") + + m.AddPersonalizations(p) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + var Body = mail.GetRequestBody(m) + request.Body = Body + response, err := sendgrid.API(request) + if err != nil { + fmt.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" + "os" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "from": { + "email": "test@example.com" + }, + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "dynamic_template_data":{ + "total":"$ 239.85", + "items":[ + { + "text":"New Line Sneakers", + "image":"https://marketing-image-production.s3.amazonaws.com/uploads/8dda1131320a6d978b515cc04ed479df259a458d5d45d58b6b381cae0bf9588113e80ef912f69e8c4cc1ef1a0297e8eefdb7b270064cc046b79a44e21b811802.png", + "price":"$ 79.95" + }, + { + "text":"Old Line Sneakers", + "image":"https://marketing-image-production.s3.amazonaws.com/uploads/3629f54390ead663d4eb7c53702e492de63299d7c5f7239efdc693b09b9b28c82c924225dcd8dcb65732d5ca7b7b753c5f17e056405bbd4596e4e63a96ae5018.png", + "price":"$ 79.95" + }, + { + "text":"Blue Line Sneakers", + "image":"https://marketing-image-production.s3.amazonaws.com/uploads/00731ed18eff0ad5da890d876c456c3124a4e44cb48196533e9b95fb2b959b7194c2dc7637b788341d1ff4f88d1dc88e23f7e3704726d313c57f350911dd2bd0.png", + "price":"$ 79.95" + } + ], + "receipt":true, + "name":"Sample Name", + "address01":"1234 Fake St.", + "address02":"Apt. 123", + "city":"Place", + "state":"CO", + "zip":"80202" + } + } + ], + "template_id":"d-c6dcf1f72bdd4beeb15a9aa6c72fcd2c" + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# Legacy Templates + +For this example, we assume you have created a [transactional template](https://sendgrid.com/docs/User_Guide/Transactional_Templates/index.html). Following is the template content we used for testing. + +Template ID (replace with your own): + +```text +13b8f94f-bcae-4ec6-b752-70d6cb59f932 +``` + +Email Subject: + +```text +<%subject%> +``` + +Template Body: + +```html + + + + + +Hello -name-, +

+I'm glad you are trying out the template feature! +

+<%body%> +

+I hope you are having a great day in -city- :) +

+ + +``` + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "I'm replacing the subject tag" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/html", "I'm replacing the body tag") + m := mail.NewV3MailInit(from, subject, to, content) + m.Personalizations[0].SetSubstitution("-name-", "Example User") + m.Personalizations[0].SetSubstitution("-city-", "Denver") + m.SetTemplateID("13b8f94f-bcae-4ec6-b752-70d6cb59f932") + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "I'm replacing the subject tag", + "substitutions": { + "-name-": "Example User", + "-city-": "Denver" + }, + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "I'm replacing the body tag" + } + ], + "template_id": "13b8f94f-bcae-4ec6-b752-70d6cb59f932" +}`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# CustomArgs + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "CustomArgs can be fun" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/html", "\n\n\t\n\n\nHello -name-,\n

\nI'm glad you are trying out the CustomArgs feature!\n

\nI hope you are having a great day in -city- :)\n

\n\n") + m := mail.NewV3MailInit(from, subject, to, content) + m.Personalizations[0].SetSubstitution("-name-", "Example User") + m.Personalizations[0].SetSubstitution("-city-", "Denver") + m.Personalizations[0].SetCustomArg("user_id", "343") + m.Personalizations[0].SetCustomArg("batch_id", "3") + + m.SetCustomArg("campaign", "welcome") + m.SetCustomArg("weekday", "morning") + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "CustomArgs can be fun", + "substitutions": { + "-name-": "Example User", + "-city-": "Denver" + }, + "custom_args": { + "user_id": "343", + "batch_id": "3" + } + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "\n\n\t\n\n\nHello -name-,\n

\nI'm glad you are trying out the CustomArgs feature!\n

\nI hope you are having a great day in -city- :)\n

\n\n" + } + ], + "custom_args": { + "campaign": "welcome", + "weekday": "morning" + } +}`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# Personalizations + +## With Mail Helper Class + +### Sending a Single Email to a Single Recipient + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + + // populate `personalization` with data + to := mail.NewEmail("Example User", "test1@example.com") + + personalization.AddTos(to) + personalization.SetSubstitution("%fname%", "recipient") + personalization.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization.Subject = "Having fun learning about personalizations?" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient with a CC + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + + // populate `personalization` with data + to := mail.NewEmail("Example User", "test1@example.com") + cc1 := mail.NewEmail("Example CC", "test2@example.com") + + personalization.AddTos(to) + personalization.AddCCs(cc1) + personalization.SetSubstitution("%fname%", "recipient") + personalization.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization.Subject = "Having fun learning about personalizations?" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient with a CC and a BCC + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + + // populate `personalization` with data + to := mail.NewEmail("Example User", "test1@example.com") + cc1 := mail.NewEmail("Example CC", "test2@example.com") + bcc1 := mail.NewEmail("Example BCC", "test3@example.com") + + personalization.AddTos(to) + personalization.AddCCs(cc1) + personalization.AddBCCs(bcc1) + personalization.SetSubstitution("%fname%", "recipient") + personalization.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization.Subject = "Having fun learning about personalizations?" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to Multiple Recipients + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + + // populate `personalization` with data + to1 := mail.NewEmail("Example User 1", "test1@example.com") + to2 := mail.NewEmail("Example User 2", "test2@example.com") + to3 := mail.NewEmail("Example User 3", "test3@example.com") + + personalization.AddTos(to1, to2, to3) + personalization.SetSubstitution("%fname%", "recipient") + personalization.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization.Subject = "Having fun learning about personalizations?" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient with Multiple CCs/BCCs + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + + // populate `personalization` with data + to := mail.NewEmail("Example User 1", "test1@example.com") + cc1 := mail.NewEmail("Example User 2", "test2@example.com") + cc2 := mail.NewEmail("Example User 3", "test3@example.com") + cc3 := mail.NewEmail("Example User 3", "test4@example.com") + + personalization.AddTos(to) + personalization.AddCCs(cc1, cc2, cc3) + personalization.SetSubstitution("%fname%", "recipient") + personalization.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization.Subject = "Having fun learning about personalizations?" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending Two Different Emails to Two Different Groups of Recipients + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

%fname% : %CustomerID% - Personalizations are awesome!

") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization(s) + personalization1 := mail.NewPersonalization() + personalization2 := mail.NewPersonalization() + + // populate `personalization1` with data + p1_to := mail.NewEmail("Example User 1", "test1@example.com") + p1_cc1 := mail.NewEmail("Example User 2", "test2@example.com") + p1_cc2 := mail.NewEmail("Example User 3", "test3@example.com") + p1_cc3 := mail.NewEmail("Example User 3", "test4@example.com") + + personalization1.AddTos(p1_to) + personalization1.AddCCs(p1_cc1, p1_cc2, p1_cc3) + personalization1.SetSubstitution("%fname%", "recipient") + personalization1.SetSubstitution("%CustomerID%", "CUSTOMER ID GOES HERE") + personalization1.Subject = "Having fun learning about personalizations?" + + // populate `personalization2` with data + p2_to := mail.NewEmail("Example User 1", "test1@example.com") + p2_cc1 := mail.NewEmail("Example User 2", "test2@example.com") + p2_cc2 := mail.NewEmail("Example User 3", "test3@example.com") + p2_cc3 := mail.NewEmail("Example User 3", "test4@example.com") + + personalization2.AddTos(p2_to) + personalization2.AddCCs(p2_cc1, p2_cc2, p2_cc3) + personalization2.SetSubstitution("%fname%", "recipient2") + personalization2.SetSubstitution("%CustomerID%", "55") + personalization2.Subject = "Personalizations are fun!" + + // add `personalization1` and `personalization2` to `m` + m.AddPersonalizations(personalization1, personalization2) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## Without Mail Helper Class + +### Sending A Single Email to a Single Recipient + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "test1@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient With a CC + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }], + "cc": [{ + "email": "recipient2@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient With a CC + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }], + "cc": [{ + "email": "recipient2@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient With a CC and a BCC + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }], + "cc": [{ + "email": "recipient2@example.com" + }], + "bcc": [{ + "email": "recipient3@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + } + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending the same Email to Multiple Recipients + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }, { + "email": "recipient2@example.com" + }, { + "email": "recipient3@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending a Single Email to a Single Recipient with Multiple CCs/BCCs + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }], + "cc": [{ + "email": "recipient2@example.com" + }, { + "email": "recipient3@example.com" + }, { + "email": "recipient4@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +### Sending Two Different Emails to Two Different Groups of Recipients + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(`{ + "personalizations": [{ + "to": [{ + "email": "recipient1@example.com" + }], + "cc": [{ + "email": "recipient2@example.com" + }, { + "email": "recipient3@example.com" + }, { + "email": "recipient4@example.com" + }], + "substitutions": { + "%fname%": "recipient", + "%CustomerID%": "CUSTOMER ID GOES HERE" + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }, { + "to": [{ + "email": "recipient5@example.com" + }], + "cc": [{ + "email": "recipient6@example.com" + }, { + "email": "recipient7@example.com" + }, { + "email": "recipient8@example.com" + }], + "substitutions": { + "%fname%": "recipient2", + "%CustomerID%": 55 + }, + "subject": "YOUR SUBJECT LINE GOES HERE" + }], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

%fname% : %CustomerID% - Personalizations are awesome!

" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +# Substitutions + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Substitutions can be fun" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/html", "\n\n\t\n\n\nHello -name-,\n

\nI'm glad you are trying out the Substitutions feature!\n

\nI hope you are having a great day in -city- :)\n

\n\n") + m := mail.NewV3MailInit(from, subject, to, content) + m.Personalizations[0].SetSubstitution("-name-", "Example User") + m.Personalizations[0].SetSubstitution("-city-", "Denver") + m.Personalizations[0].SetSubstitution("-user_id-", "343") + m.Personalizations[0].SetCustomArg("user_id", "-user_id-") + m.Personalizations[0].SetCustomArg("city", "-city-") + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} + ``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "Substitutions can be fun", + "substitutions": { + "-name-": "Example User", + "-city-": "Denver", + "-user_id-": "343" + }, + "custom_args": { + "user_id": "-user_id-", + "city": "-city-" + } + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "\n\n\t\n\n\nHello -name-,\n

\nI'm glad you are trying out the Substitutions feature!\n

\nI hope you are having a great day in -city- :)\n

\n\n" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} + ``` + + +# Sections + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + from := mail.NewEmail("Example User", "test@example.com") + subject := "Sections can be fun" + to := mail.NewEmail("Example User", "test@example.com") + content := mail.NewContent("text/html", "\n\n\t\n\n\n-wel-\n

\nI'm glad you are trying out the Sections feature!\n

\n-gday-\n

\n\n") + m := mail.NewV3MailInit(from, subject, to, content) + m.Personalizations[0].SetSubstitution("-name-", "Example User") + m.Personalizations[0].SetSubstitution("-city-", "Denver") + m.Personalizations[0].SetSubstitution("-wel-", "-welcome-") + m.Personalizations[0].SetSubstitution("-gday-", "-great_day-") + + m.AddSection("-welcome-", "Hello -name-,") + m.AddSection("-great_day-", "I hope you are having a great day in -city- :)") + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} + ``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test@example.com" + } + ], + "subject": "Sections can be fun", + "substitutions": { + "-name-": "Example User", + "-city-": "Denver", + "-wel-": "-welcome-", + "-gday-": "-great_day-" + } + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "\n\n\t\n\n\n-wel-\n

\nI'm glad you are trying out the Sections feature!\n

\n-gday-\n

\n\n" + } + ], + "sections": { + "section": { + "-welcome-": "Hello -name-,", + "-great_day-": "I hope you are having a great day in -city- :)" + } + } + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} + ``` + +# Attachments + +## With Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + "encoding/base64" + "io/ioutil" + "github.com/sendgrid/sendgrid-go" + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +func main() { + // create new *SGMailV3 + m := mail.NewV3Mail() + + from := mail.NewEmail("test", "test@example.com") + content := mail.NewContent("text/html", "

Sending different attachments.

") + to := mail.NewEmail("Example User", "test1@example.com") + + m.SetFrom(from) + m.AddContent(content) + + // create new *Personalization + personalization := mail.NewPersonalization() + personalization.AddTos(to) + personalization.Subject = "Attachments - Demystified!" + + // add `personalization` to `m` + m.AddPersonalizations(personalization) + + // read/attach .txt file + a_txt := mail.NewAttachment() + dat, err := ioutil.ReadFile("testing.txt") + if err != nil { + fmt.Println(err) + } + encoded := base64.StdEncoding.EncodeToString([]byte(dat)) + a_txt.SetContent(encoded) + a_txt.SetType("text/plain") + a_txt.SetFilename("testing.txt") + a_txt.SetDisposition("attachment") + a_txt.SetContentID("Test Document") + + // read/attach .pdf file + a_pdf := mail.NewAttachment() + dat, err = ioutil.ReadFile("testing.pdf") + if err != nil { + fmt.Println(err) + } + encoded = base64.StdEncoding.EncodeToString([]byte(dat)) + a_pdf.SetContent(encoded) + a_pdf.SetType("application/pdf") + a_pdf.SetFilename("testing.pdf") + a_pdf.SetDisposition("attachment") + a_pdf.SetContentID("Test Attachment") + + // read/attach .jpg file + a_jpg := mail.NewAttachment() + dat, err = ioutil.ReadFile("testing.jpg") + if err != nil { + fmt.Println(err) + } + encoded = base64.StdEncoding.EncodeToString([]byte(dat)) + a_jpg.SetContent(encoded) + a_jpg.SetType("image/jpeg") + a_jpg.SetFilename("testing.jpg") + a_jpg.SetDisposition("attachment") + a_jpg.SetContentID("Test Attachment") + + // add `a_txt`, `a_pdf` and `a_jpg` to `m` + m.AddAttachment(a_txt) + m.AddAttachment(a_pdf) + m.AddAttachment(a_jpg) + + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = mail.GetRequestBody(m) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + +## Without Mail Helper Class + +```go +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + request := sendgrid.GetRequest(os.Getenv("SENDGRID_API_KEY"), "/v3/mail/send", "https://api.sendgrid.com") + request.Method = "POST" + request.Body = []byte(` { + "personalizations": [ + { + "to": [ + { + "email": "test1@example.com" + } + ], + "subject": "Attachments - Demystified!" + } + } + ], + "from": { + "email": "test@example.com" + }, + "content": [ + { + "type": "text/html", + "value": "

Sending different attachments.

" + } + ], + "attachments": [ + { + "content": "SGVsbG8gV29ybGQh", + "content_id": "testing_1", + "disposition": "attachment", + "filename": "testing.txt", + "type": "txt" + }, + { + "content": "BASE64 encoded content block here", + "content_id": "testing_2", + "disposition": "attachment", + "filename": "testing.jpg", + "type": "jpg" + }, + { + "content": "BASE64 encoded content block here", + "content_id": "testing_3", + "disposition": "attachment", + "filename": "testing.pdf", + "type": "pdf" + } + ] + }`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +### How to View Email Statistics +You can find documentation for how to view your email statistics via the UI [here](https://app.sendgrid.com/statistics). +To view Email Statistics via the API: +``` +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + apiKey := os.Getenv("SENDGRID_API_KEY") + host := "https://api.sendgrid.com" + request := sendgrid.GetRequest(apiKey, "/v3/stats", host) + request.Method = "GET" + queryParams := make(map[string]string) + queryParams["aggregated_by"] = "day" + queryParams["limit"] = "1" + queryParams["start_date"] = "2017-01-01" + queryParams["end_date"] = "2017-10-12" + queryParams["offset"] = "1" + request.QueryParams = queryParams + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` + + +### How to Setup a Domain Whitelabel +You can find documentation for how to setup a domain whitelabel via the UI [here](https://sendgrid.com/docs/Classroom/Basics/Whitelabel/setup_domain_whitelabel.html). +Find more information about all of SendGrid's whitelabeling related documentation [here](https://sendgrid.com/docs/Classroom/Basics/Whitelabel/index.html). + +To create a Domain Whitelabel Via the API: +``` +package main + +import ( + "fmt" + "log" + "os" + + "github.com/sendgrid/sendgrid-go" +) + +func main() { + apiKey := os.Getenv("SENDGRID_API_KEY") + host := "https://api.sendgrid.com" + request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host) + request.Method = "POST" + request.Body = []byte(` { + "automatic_security": false, + "custom_spf": true, + "default": true, + "domain": "example.com", + "ips": [ + "192.168.1.1", + "192.168.1.2" + ], + "subdomain": "SUBDOMAIN", + "username": "YOUR_SENDGRID_SUBUSER_NAME" +}`) + response, err := sendgrid.API(request) + if err != nil { + log.Println(err) + } else { + fmt.Println(response.StatusCode) + fmt.Println(response.Body) + fmt.Println(response.Headers) + } +} +``` diff --git a/vendor/github.com/sendgrid/sendgrid-go/go.test.sh b/vendor/github.com/sendgrid/sendgrid-go/go.test.sh new file mode 100755 index 0000000..34dbbfb --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md new file mode 100644 index 0000000..07ce495 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/README.md @@ -0,0 +1,31 @@ +**This helper allows you to quickly and easily build a Mail object for sending email through SendGrid.** + +## Dependencies + +- [rest](https://github.com/sendgrid/rest) + +# Quick Start + +Run the [example](https://github.com/sendgrid/sendgrid-go/tree/master/examples/helpers/mail/example.go) (make sure you have set your environment variable to include your SENDGRID_API_KEY). + +```bash +go run examples/helpers/mail/example.go +``` + +## Usage + +- See the [example](https://github.com/sendgrid/sendgrid-go/tree/master/examples/helpers/mail/example.go) for a complete working example. +- [Documentation](https://sendgrid.com/docs/API_Reference/Web_API_v3/Mail/overview.html) + +## Test + +```bash +go test ./... -v +``` + +or + +```bash +cd helpers/mail +go test -v +``` diff --git a/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go new file mode 100644 index 0000000..c782645 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/helpers/mail/mail_v3.go @@ -0,0 +1,676 @@ +package mail + +import ( + "encoding/json" + "log" +) + +// SGMailV3 contains mail struct +type SGMailV3 struct { + From *Email `json:"from,omitempty"` + Subject string `json:"subject,omitempty"` + Personalizations []*Personalization `json:"personalizations,omitempty"` + Content []*Content `json:"content,omitempty"` + Attachments []*Attachment `json:"attachments,omitempty"` + TemplateID string `json:"template_id,omitempty"` + Sections map[string]string `json:"sections,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Categories []string `json:"categories,omitempty"` + CustomArgs map[string]string `json:"custom_args,omitempty"` + SendAt int `json:"send_at,omitempty"` + BatchID string `json:"batch_id,omitempty"` + Asm *Asm `json:"asm,omitempty"` + IPPoolID string `json:"ip_pool_name,omitempty"` + MailSettings *MailSettings `json:"mail_settings,omitempty"` + TrackingSettings *TrackingSettings `json:"tracking_settings,omitempty"` + ReplyTo *Email `json:"reply_to,omitempty"` +} + +// Personalization ... +type Personalization struct { + To []*Email `json:"to,omitempty"` + CC []*Email `json:"cc,omitempty"` + BCC []*Email `json:"bcc,omitempty"` + Subject string `json:"subject,omitempty"` + Headers map[string]string `json:"headers,omitempty"` + Substitutions map[string]string `json:"substitutions,omitempty"` + CustomArgs map[string]string `json:"custom_args,omitempty"` + DynamicTemplateData map[string]interface{} `json:"dynamic_template_data,omitempty"` + Categories []string `json:"categories,omitempty"` + SendAt int `json:"send_at,omitempty"` +} + +// Email holds email name and address info +type Email struct { + Name string `json:"name,omitempty"` + Address string `json:"email,omitempty"` +} + +// Content defines content of the mail body +type Content struct { + Type string `json:"type,omitempty"` + Value string `json:"value,omitempty"` +} + +// Attachment holds attachement information +type Attachment struct { + Content string `json:"content,omitempty"` + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Filename string `json:"filename,omitempty"` + Disposition string `json:"disposition,omitempty"` + ContentID string `json:"content_id,omitempty"` +} + +// Asm ... +type Asm struct { + GroupID int `json:"group_id,omitempty"` + GroupsToDisplay []int `json:"groups_to_display,omitempty"` +} + +// MailSettings ... +type MailSettings struct { + BCC *BccSetting `json:"bcc,omitempty"` + BypassListManagement *Setting `json:"bypass_list_management,omitempty"` + Footer *FooterSetting `json:"footer,omitempty"` + SandboxMode *Setting `json:"sandbox_mode,omitempty"` + SpamCheckSetting *SpamCheckSetting `json:"spam_check,omitempty"` +} + +// TrackingSettings ... +type TrackingSettings struct { + ClickTracking *ClickTrackingSetting `json:"click_tracking,omitempty"` + OpenTracking *OpenTrackingSetting `json:"open_tracking,omitempty"` + SubscriptionTracking *SubscriptionTrackingSetting `json:"subscription_tracking,omitempty"` + GoogleAnalytics *GaSetting `json:"ganalytics,omitempty"` + BCC *BccSetting `json:"bcc,omitempty"` + BypassListManagement *Setting `json:"bypass_list_management,omitempty"` + Footer *FooterSetting `json:"footer,omitempty"` + SandboxMode *SandboxModeSetting `json:"sandbox_mode,omitempty"` +} + +// BccSetting ... +type BccSetting struct { + Enable *bool `json:"enable,omitempty"` + Email string `json:"email,omitempty"` +} + +// FooterSetting ... +type FooterSetting struct { + Enable *bool `json:"enable,omitempty"` + Text string `json:"text,omitempty"` + Html string `json:"html,omitempty"` +} + +// ClickTrackingSetting ... +type ClickTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + EnableText *bool `json:"enable_text,omitempty"` +} + +// OpenTrackingSetting ... +type OpenTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + SubstitutionTag string `json:"substitution_tag,omitempty"` +} + +// SandboxModeSetting ... +type SandboxModeSetting struct { + Enable *bool `json:"enable,omitempty"` + ForwardSpam *bool `json:"forward_spam,omitempty"` + SpamCheck *SpamCheckSetting `json:"spam_check,omitempty"` +} + +// SpamCheckSetting ... +type SpamCheckSetting struct { + Enable *bool `json:"enable,omitempty"` + SpamThreshold int `json:"threshold,omitempty"` + PostToURL string `json:"post_to_url,omitempty"` +} + +// SubscriptionTrackingSetting ... +type SubscriptionTrackingSetting struct { + Enable *bool `json:"enable,omitempty"` + Text string `json:"text,omitempty"` + Html string `json:"html,omitempty"` + SubstitutionTag string `json:"substitution_tag,omitempty"` +} + +// GaSetting ... +type GaSetting struct { + Enable *bool `json:"enable,omitempty"` + CampaignSource string `json:"utm_source,omitempty"` + CampaignTerm string `json:"utm_term,omitempty"` + CampaignContent string `json:"utm_content,omitempty"` + CampaignName string `json:"utm_campaign,omitempty"` + CampaignMedium string `json:"utm_medium,omitempty"` +} + +// Setting ... +type Setting struct { + Enable *bool `json:"enable,omitempty"` +} + +// NewV3Mail ... +func NewV3Mail() *SGMailV3 { + return &SGMailV3{ + Personalizations: make([]*Personalization, 0), + Content: make([]*Content, 0), + Attachments: make([]*Attachment, 0), + } +} + +// NewV3MailInit ... +func NewV3MailInit(from *Email, subject string, to *Email, content ...*Content) *SGMailV3 { + m := new(SGMailV3) + m.SetFrom(from) + m.Subject = subject + p := NewPersonalization() + p.AddTos(to) + m.AddPersonalizations(p) + m.AddContent(content...) + return m +} + +// GetRequestBody ... +func GetRequestBody(m *SGMailV3) []byte { + b, err := json.Marshal(m) + if err != nil { + log.Println(err) + } + return b +} + +// AddPersonalizations ... +func (s *SGMailV3) AddPersonalizations(p ...*Personalization) *SGMailV3 { + s.Personalizations = append(s.Personalizations, p...) + return s +} + +// AddContent ... +func (s *SGMailV3) AddContent(c ...*Content) *SGMailV3 { + s.Content = append(s.Content, c...) + return s +} + +// AddAttachment ... +func (s *SGMailV3) AddAttachment(a ...*Attachment) *SGMailV3 { + s.Attachments = append(s.Attachments, a...) + return s +} + +// SetFrom ... +func (s *SGMailV3) SetFrom(e *Email) *SGMailV3 { + s.From = e + return s +} + +// SetReplyTo ... +func (s *SGMailV3) SetReplyTo(e *Email) *SGMailV3 { + s.ReplyTo = e + return s +} + +// SetTemplateID ... +func (s *SGMailV3) SetTemplateID(templateID string) *SGMailV3 { + s.TemplateID = templateID + return s +} + +// AddSection ... +func (s *SGMailV3) AddSection(key string, value string) *SGMailV3 { + if s.Sections == nil { + s.Sections = make(map[string]string) + } + + s.Sections[key] = value + return s +} + +// SetHeader ... +func (s *SGMailV3) SetHeader(key string, value string) *SGMailV3 { + if s.Headers == nil { + s.Headers = make(map[string]string) + } + + s.Headers[key] = value + return s +} + +// AddCategories ... +func (s *SGMailV3) AddCategories(category ...string) *SGMailV3 { + s.Categories = append(s.Categories, category...) + return s +} + +// SetCustomArg ... +func (s *SGMailV3) SetCustomArg(key string, value string) *SGMailV3 { + if s.CustomArgs == nil { + s.CustomArgs = make(map[string]string) + } + + s.CustomArgs[key] = value + return s +} + +// SetSendAt ... +func (s *SGMailV3) SetSendAt(sendAt int) *SGMailV3 { + s.SendAt = sendAt + return s +} + +// SetBatchID ... +func (s *SGMailV3) SetBatchID(batchID string) *SGMailV3 { + s.BatchID = batchID + return s +} + +// SetASM ... +func (s *SGMailV3) SetASM(asm *Asm) *SGMailV3 { + s.Asm = asm + return s +} + +// SetIPPoolID ... +func (s *SGMailV3) SetIPPoolID(ipPoolID string) *SGMailV3 { + s.IPPoolID = ipPoolID + return s +} + +// SetMailSettings ... +func (s *SGMailV3) SetMailSettings(mailSettings *MailSettings) *SGMailV3 { + s.MailSettings = mailSettings + return s +} + +// SetTrackingSettings ... +func (s *SGMailV3) SetTrackingSettings(trackingSettings *TrackingSettings) *SGMailV3 { + s.TrackingSettings = trackingSettings + return s +} + +// NewPersonalization ... +func NewPersonalization() *Personalization { + return &Personalization{ + To: make([]*Email, 0), + CC: make([]*Email, 0), + BCC: make([]*Email, 0), + Headers: make(map[string]string), + Substitutions: make(map[string]string), + CustomArgs: make(map[string]string), + DynamicTemplateData: make(map[string]interface{}), + Categories: make([]string, 0), + } +} + +// AddTos ... +func (p *Personalization) AddTos(to ...*Email) { + p.To = append(p.To, to...) +} + +// AddCCs ... +func (p *Personalization) AddCCs(cc ...*Email) { + p.CC = append(p.CC, cc...) +} + +// AddBCCs ... +func (p *Personalization) AddBCCs(bcc ...*Email) { + p.BCC = append(p.BCC, bcc...) +} + +// SetHeader ... +func (p *Personalization) SetHeader(key string, value string) { + p.Headers[key] = value +} + +// SetSubstitution ... +func (p *Personalization) SetSubstitution(key string, value string) { + p.Substitutions[key] = value +} + +// SetCustomArg ... +func (p *Personalization) SetCustomArg(key string, value string) { + p.CustomArgs[key] = value +} + +// SetDynamicTemplateData ... +func (p *Personalization) SetDynamicTemplateData(key string, value interface{}) { + p.DynamicTemplateData[key] = value +} + +// SetSendAt ... +func (p *Personalization) SetSendAt(sendAt int) { + p.SendAt = sendAt +} + +// NewAttachment ... +func NewAttachment() *Attachment { + return &Attachment{} +} + +// SetContent ... +func (a *Attachment) SetContent(content string) *Attachment { + a.Content = content + return a +} + +// SetType ... +func (a *Attachment) SetType(contentType string) *Attachment { + a.Type = contentType + return a +} + +// SetFilename ... +func (a *Attachment) SetFilename(filename string) *Attachment { + a.Filename = filename + return a +} + +// SetDisposition ... +func (a *Attachment) SetDisposition(disposition string) *Attachment { + a.Disposition = disposition + return a +} + +// SetContentID ... +func (a *Attachment) SetContentID(contentID string) *Attachment { + a.ContentID = contentID + return a +} + +// NewASM ... +func NewASM() *Asm { + return &Asm{} +} + +// SetGroupID ... +func (a *Asm) SetGroupID(groupID int) *Asm { + a.GroupID = groupID + return a +} + +// AddGroupsToDisplay ... +func (a *Asm) AddGroupsToDisplay(groupsToDisplay ...int) *Asm { + a.GroupsToDisplay = append(a.GroupsToDisplay, groupsToDisplay...) + return a +} + +// NewMailSettings ... +func NewMailSettings() *MailSettings { + return &MailSettings{} +} + +// SetBCC ... +func (m *MailSettings) SetBCC(bcc *BccSetting) *MailSettings { + m.BCC = bcc + return m +} + +// SetBypassListManagement ... +func (m *MailSettings) SetBypassListManagement(bypassListManagement *Setting) *MailSettings { + m.BypassListManagement = bypassListManagement + return m +} + +// SetFooter ... +func (m *MailSettings) SetFooter(footerSetting *FooterSetting) *MailSettings { + m.Footer = footerSetting + return m +} + +// SetSandboxMode ... +func (m *MailSettings) SetSandboxMode(sandboxMode *Setting) *MailSettings { + m.SandboxMode = sandboxMode + return m +} + +// SetSpamCheckSettings ... +func (m *MailSettings) SetSpamCheckSettings(spamCheckSetting *SpamCheckSetting) *MailSettings { + m.SpamCheckSetting = spamCheckSetting + return m +} + +// NewTrackingSettings ... +func NewTrackingSettings() *TrackingSettings { + return &TrackingSettings{} +} + +// SetClickTracking ... +func (t *TrackingSettings) SetClickTracking(clickTracking *ClickTrackingSetting) *TrackingSettings { + t.ClickTracking = clickTracking + return t + +} + +// SetOpenTracking ... +func (t *TrackingSettings) SetOpenTracking(openTracking *OpenTrackingSetting) *TrackingSettings { + t.OpenTracking = openTracking + return t +} + +// SetSubscriptionTracking ... +func (t *TrackingSettings) SetSubscriptionTracking(subscriptionTracking *SubscriptionTrackingSetting) *TrackingSettings { + t.SubscriptionTracking = subscriptionTracking + return t +} + +// SetGoogleAnalytics ... +func (t *TrackingSettings) SetGoogleAnalytics(googleAnalytics *GaSetting) *TrackingSettings { + t.GoogleAnalytics = googleAnalytics + return t +} + +// NewBCCSetting ... +func NewBCCSetting() *BccSetting { + return &BccSetting{} +} + +// SetEnable ... +func (b *BccSetting) SetEnable(enable bool) *BccSetting { + setEnable := enable + b.Enable = &setEnable + return b +} + +// SetEmail ... +func (b *BccSetting) SetEmail(email string) *BccSetting { + b.Email = email + return b +} + +// NewFooterSetting ... +func NewFooterSetting() *FooterSetting { + return &FooterSetting{} +} + +// SetEnable ... +func (f *FooterSetting) SetEnable(enable bool) *FooterSetting { + setEnable := enable + f.Enable = &setEnable + return f +} + +// SetText ... +func (f *FooterSetting) SetText(text string) *FooterSetting { + f.Text = text + return f +} + +// SetHTML ... +func (f *FooterSetting) SetHTML(html string) *FooterSetting { + f.Html = html + return f +} + +// NewOpenTrackingSetting ... +func NewOpenTrackingSetting() *OpenTrackingSetting { + return &OpenTrackingSetting{} +} + +// SetEnable ... +func (o *OpenTrackingSetting) SetEnable(enable bool) *OpenTrackingSetting { + setEnable := enable + o.Enable = &setEnable + return o +} + +// SetSubstitutionTag ... +func (o *OpenTrackingSetting) SetSubstitutionTag(subTag string) *OpenTrackingSetting { + o.SubstitutionTag = subTag + return o +} + +// NewSubscriptionTrackingSetting ... +func NewSubscriptionTrackingSetting() *SubscriptionTrackingSetting { + return &SubscriptionTrackingSetting{} +} + +// SetEnable ... +func (s *SubscriptionTrackingSetting) SetEnable(enable bool) *SubscriptionTrackingSetting { + setEnable := enable + s.Enable = &setEnable + return s +} + +// SetText ... +func (s *SubscriptionTrackingSetting) SetText(text string) *SubscriptionTrackingSetting { + s.Text = text + return s +} + +// SetHTML ... +func (s *SubscriptionTrackingSetting) SetHTML(html string) *SubscriptionTrackingSetting { + s.Html = html + return s +} + +// SetSubstitutionTag ... +func (s *SubscriptionTrackingSetting) SetSubstitutionTag(subTag string) *SubscriptionTrackingSetting { + s.SubstitutionTag = subTag + return s +} + +// NewGaSetting ... +func NewGaSetting() *GaSetting { + return &GaSetting{} +} + +// SetEnable ... +func (g *GaSetting) SetEnable(enable bool) *GaSetting { + setEnable := enable + g.Enable = &setEnable + return g +} + +// SetCampaignSource ... +func (g *GaSetting) SetCampaignSource(campaignSource string) *GaSetting { + g.CampaignSource = campaignSource + return g +} + +// SetCampaignContent ... +func (g *GaSetting) SetCampaignContent(campaignContent string) *GaSetting { + g.CampaignContent = campaignContent + return g +} + +// SetCampaignTerm ... +func (g *GaSetting) SetCampaignTerm(campaignTerm string) *GaSetting { + g.CampaignTerm = campaignTerm + return g +} + +// SetCampaignName ... +func (g *GaSetting) SetCampaignName(campaignName string) *GaSetting { + g.CampaignName = campaignName + return g +} + +// SetCampaignMedium ... +func (g *GaSetting) SetCampaignMedium(campaignMedium string) *GaSetting { + g.CampaignMedium = campaignMedium + return g +} + +// NewSetting ... +func NewSetting(enable bool) *Setting { + setEnable := enable + return &Setting{Enable: &setEnable} +} + +// NewEmail ... +func NewEmail(name string, address string) *Email { + return &Email{ + Name: name, + Address: address, + } +} + +// NewSingleEmail ... +func NewSingleEmail(from *Email, subject string, to *Email, plainTextContent string, htmlContent string) *SGMailV3 { + plainText := NewContent("text/plain", plainTextContent) + html := NewContent("text/html", htmlContent) + return NewV3MailInit(from, subject, to, plainText, html) +} + +// NewContent ... +func NewContent(contentType string, value string) *Content { + return &Content{ + Type: contentType, + Value: value, + } +} + +// NewClickTrackingSetting ... +func NewClickTrackingSetting() *ClickTrackingSetting { + return &ClickTrackingSetting{} +} + +// SetEnable ... +func (c *ClickTrackingSetting) SetEnable(enable bool) *ClickTrackingSetting { + setEnable := enable + c.Enable = &setEnable + return c +} + +// SetEnableText ... +func (c *ClickTrackingSetting) SetEnableText(enableText bool) *ClickTrackingSetting { + setEnable := enableText + c.EnableText = &setEnable + return c +} + +// NewSpamCheckSetting ... +func NewSpamCheckSetting() *SpamCheckSetting { + return &SpamCheckSetting{} +} + +// SetEnable ... +func (s *SpamCheckSetting) SetEnable(enable bool) *SpamCheckSetting { + setEnable := enable + s.Enable = &setEnable + return s +} + +// SetSpamThreshold ... +func (s *SpamCheckSetting) SetSpamThreshold(spamThreshold int) *SpamCheckSetting { + s.SpamThreshold = spamThreshold + return s +} + +// SetPostToURL ... +func (s *SpamCheckSetting) SetPostToURL(postToURL string) *SpamCheckSetting { + s.PostToURL = postToURL + return s +} + +// NewSandboxModeSetting ... +func NewSandboxModeSetting(enable bool, forwardSpam bool, spamCheck *SpamCheckSetting) *SandboxModeSetting { + setEnable := enable + setForwardSpam := forwardSpam + return &SandboxModeSetting{ + Enable: &setEnable, + ForwardSpam: &setForwardSpam, + SpamCheck: spamCheck, + } +} diff --git a/vendor/github.com/sendgrid/sendgrid-go/prism.sh b/vendor/github.com/sendgrid/sendgrid-go/prism.sh new file mode 100755 index 0000000..1c638c7 --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/prism.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +install () { + +set -eu + +UNAME=$(uname) +ARCH=$(uname -m) +if [ "$UNAME" != "Linux" ] && [ "$UNAME" != "Darwin" ] && [ "$ARCH" != "x86_64" ] && [ "$ARCH" != "i686" ]; then + echo "Sorry, OS/Architecture not supported: ${UNAME}/${ARCH}. Download binary from https://github.com/stoplightio/prism/releases" + exit 1 +fi + +if [ "$UNAME" = "Darwin" ] ; then + OSX_ARCH=$(uname -m) + if [ "${OSX_ARCH}" = "x86_64" ] ; then + PLATFORM="darwin_amd64" + fi +elif [ "$UNAME" = "Linux" ] ; then + LINUX_ARCH=$(uname -m) + if [ "${LINUX_ARCH}" = "i686" ] ; then + PLATFORM="linux_386" + elif [ "${LINUX_ARCH}" = "x86_64" ] ; then + PLATFORM="linux_amd64" + fi +fi + +#LATEST=$(curl -s https://api.github.com/repos/stoplightio/prism/tags | grep -Eo '"name":.*?[^\\]",' | head -n 1 | sed 's/[," ]//g' | cut -d ':' -f 2) +LATEST="v0.1.5" +URL="https://github.com/stoplightio/prism/releases/download/$LATEST/prism_$PLATFORM" +DEST=/home/travis/gopath/bin/prism + +if [ -z $LATEST ] ; then + echo "Error requesting. Download binary from ${URL}" + exit 1 +else + curl -L $URL -o $DEST + chmod +x $DEST +fi +} + +install \ No newline at end of file diff --git a/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go b/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go new file mode 100644 index 0000000..5e2a0ed --- /dev/null +++ b/vendor/github.com/sendgrid/sendgrid-go/sendgrid.go @@ -0,0 +1,127 @@ +// Package sendgrid provides a simple interface to interact with the SendGrid API +package sendgrid + +import ( + "errors" + "net/http" + "strconv" + "time" + + "github.com/sendgrid/rest" // depends on version 2.2.0 + "github.com/sendgrid/sendgrid-go/helpers/mail" +) + +// Version is this client library's current version +const ( + Version = "3.1.0" + rateLimitRetry = 5 + rateLimitSleep = 1100 +) + +// Client is the SendGrid Go client +type Client struct { + // rest.Request + rest.Request +} + +// GetRequest returns a default request object. +func GetRequest(key string, endpoint string, host string) rest.Request { + if host == "" { + host = "https://api.sendgrid.com" + } + baseURL := host + endpoint + requestHeaders := map[string]string{ + "Authorization": "Bearer " + key, + "User-Agent": "sendgrid/" + Version + ";go", + "Accept": "application/json", + } + request := rest.Request{ + BaseURL: baseURL, + Headers: requestHeaders, + } + return request +} + +// Send sends an email through SendGrid +func (cl *Client) Send(email *mail.SGMailV3) (*rest.Response, error) { + cl.Body = mail.GetRequestBody(email) + return API(cl.Request) +} + +// NewSendClient constructs a new SendGrid client given an API key +func NewSendClient(key string) *Client { + request := GetRequest(key, "/v3/mail/send", "") + request.Method = "POST" + return &Client{request} +} + +// DefaultClient is used if no custom HTTP client is defined +var DefaultClient = rest.DefaultClient + +// API sets up the request to the SendGrid API, this is main interface. +// This function is deprecated. Please use the MakeRequest or +// MakeRequestAsync functions. +func API(request rest.Request) (*rest.Response, error) { + return DefaultClient.API(request) +} + +// MakeRequest attemps a SendGrid request synchronously. +func MakeRequest(request rest.Request) (*rest.Response, error) { + return DefaultClient.API(request) +} + +// MakeRequestRetry a synchronous request, but retry in the event of a rate +// limited response. +func MakeRequestRetry(request rest.Request) (*rest.Response, error) { + retry := 0 + var response *rest.Response + var err error + + for { + response, err = DefaultClient.API(request) + if err != nil { + return nil, err + } + + if response.StatusCode != http.StatusTooManyRequests { + return response, nil + } + + if retry > rateLimitRetry { + return nil, errors.New("Rate limit retry exceeded") + } + retry++ + + resetTime := time.Now().Add(rateLimitSleep * time.Millisecond) + + reset, ok := response.Headers["X-RateLimit-Reset"] + if ok && len(reset) > 0 { + t, err := strconv.Atoi(reset[0]) + if err == nil { + resetTime = time.Unix(int64(t), 0) + } + } + time.Sleep(resetTime.Sub(time.Now())) + } +} + +// MakeRequestAsync attempts a request asynchronously in a new go +// routine. This function returns two channels: responses +// and errors. This function will retry in the case of a +// rate limit. +func MakeRequestAsync(request rest.Request) (chan *rest.Response, chan error) { + r := make(chan *rest.Response) + e := make(chan error) + + go func() { + response, err := MakeRequestRetry(request) + if err != nil { + e <- err + } + if response != nil { + r <- response + } + }() + + return r, e +} diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 0000000..44d4d9d --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 0000000..d95be0c --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,171 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + if strings.Contains(thisSel, "[") { + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } + current = curMSI[thisSel] + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + return current +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + return value +} diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go new file mode 100644 index 0000000..f9eb42a --- /dev/null +++ b/vendor/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 0000000..5e020f3 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,108 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, err = encoder.Write([]byte(jsonData)) + if err != nil { + return "", err + } + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 0000000..6d6af1a --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 0000000..7e9389a --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,193 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key argument is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + newMap[keyString] = value + } + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + if err != nil { + return Nil, err + } + return New(data), nil +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 0000000..e7b8eb7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,74 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + var shouldInclude = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 0000000..e052ff8 --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,17 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + _, err := hash.Write([]byte(data + ":" + key)) + if err != nil { + return "" + } + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 0000000..d9e0b47 --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 0000000..202a91f --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2501 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 0000000..956a221 --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,56 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 0000000..473b670 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 0000000..aa1c2b9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,484 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 0000000..d2bb0b8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + if h, ok := t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 0000000..de39f79 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,956 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 0000000..188bb9e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + if h, ok := a.t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 0000000..5bdec56 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1394 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool + +// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) + } + + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("\n%s", ""+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + if ek != reflect.String { + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) + } else { + e = expected.(string) + a = actual.(string) + } + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 0000000..c9dccc4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 0000000..ac9dc9d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 0000000..9ad5685 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 0000000..df46fa7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,143 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url, nil) + if err != nil { + return -1, err + } + req.URL.RawQuery = values.Encode() + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 0000000..7324128 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 0000000..cc4f642 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,885 @@ +package mock + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // Holds the caller info for the On() call + callerInfo []string + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Call to this method can be optional + optional bool + + // Holds a channel that will be used to block the Return until it either + // receives a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + waitTime time.Duration + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + callerInfo: callerInfo, + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + c.lock() + defer c.unlock() + c.waitTime = d + return c +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(args Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// Maybe allows the method call to be optional. Not calling an optional method +// will not cause an error while asserting expectations +func (c *Call) Maybe() *Call { + c.lock() + defer c.unlock() + c.optional = true + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // test is An optional variable that holds the test struct, to be used when an + // invalid mock call was made. + test TestingT + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// Test sets the test struct variable of the mock object +func (m *Mock) Test(t TestingT) { + m.mutex.Lock() + defer m.mutex.Unlock() + m.test = t +} + +// fail fails the current test with the given formatted format and args. +// In case that a test was defined, it uses the test APIs for failing a test, +// otherwise it uses panic. +func (m *Mock) fail(format string, args ...interface{}) { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.test == nil { + panic(fmt.Sprintf(format, args...)) + } + m.test.Errorf(format, args...) + m.test.FailNow() +} + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, assert.CallerInfo(), arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { + var diffCount int + var closestCall *Call + var err string + + for _, call := range m.expectedCalls() { + if call.Method == method { + + errInfo, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + err = errInfo + } + + } + } + + return closestCall, err +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + return m.MethodCalled(functionName, arguments...) +} + +// MethodCalled tells the mock object that the given method has been called, and gets +// an array of arguments to return. Panics if the call is unexpected (i.e. not preceded +// by appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Arguments { + m.mutex.Lock() + //TODO: could combine expected and closes in single loop + found, call := m.findExpectedCall(methodName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestCall, mismatch := m.findClosestCall(methodName, arguments...) + m.mutex.Unlock() + + if closestCall != nil { + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + callString(methodName, arguments, true), + callString(methodName, closestCall.Arguments, true), + diffArguments(closestCall.Arguments, arguments), + strings.TrimSpace(mismatch), + ) + } else { + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } + } + + if call.Repeatability == 1 { + call.Repeatability = -1 + } else if call.Repeatability > 1 { + call.Repeatability-- + } + call.totalCalls++ + + // add the call + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } else { + time.Sleep(call.waitTime) + } + + m.mutex.Lock() + runFn := call.RunFn + m.mutex.Unlock() + + if runFn != nil { + runFn(arguments) + } + + m.mutex.Lock() + returnArgs := call.ReturnArguments + m.mutex.Unlock() + + return returnArgs +} + +/* + Assertions +*/ + +type assertExpectationser interface { + AssertExpectations(TestingT) bool +} + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + for _, obj := range testObjects { + if m, ok := obj.(Mock); ok { + t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") + obj = &m + } + m := obj.(assertExpectationser) + if !m.AssertExpectations(t) { + t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) + return false + } + } + return true +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !expectedCall.optional && !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + t.Logf("FAIL:\t%s(%s)\n\t\tat: %s", expectedCall.Method, expectedCall.Arguments.String(), expectedCall.callerInfo) + } else { + t.Logf("PASS:\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if !m.methodWasCalled(methodName, arguments) { + var calledWithArgs []string + for _, call := range m.calls() { + calledWithArgs = append(calledWithArgs, fmt.Sprintf("%v", call.Arguments)) + } + if len(calledWithArgs) == 0 { + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut no actual calls happened", methodName, arguments)) + } + return assert.Fail(t, "Should have called with given arguments", + fmt.Sprintf("Expected %q to have been called with:\n%v\nbut actual calls were:\n %v", methodName, arguments, strings.Join(calledWithArgs, "\n"))) + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + m.mutex.Lock() + defer m.mutex.Unlock() + if m.methodWasCalled(methodName, arguments) { + return assert.Fail(t, "Should not have called with given arguments", + fmt.Sprintf("Expected %q to not have been called with:\n%v\nbut actually it was.", methodName, arguments)) + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + expectTypeNilSupported := false + switch expectType.Kind() { + case reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Ptr: + expectTypeNilSupported = true + } + + argType := reflect.TypeOf(argument) + var arg reflect.Value + if argType == nil { + arg = reflect.New(expectType).Elem() + } else { + arg = reflect.ValueOf(argument) + } + + if argType == nil && !expectTypeNilSupported { + panic(errors.New("attempting to call matcher with nil for non-nil expected type")) + } + if argType == nil || argType.AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{arg}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MatchedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + //TODO: could return string as error and nil for No difference + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + var actualFmt, expectedFmt string + + if len(objects) <= i { + actual = "(Missing)" + actualFmt = "(Missing)" + } else { + actual = objects[i] + actualFmt = fmt.Sprintf("(%[1]T=%[1]v)", actual) + } + + if len(args) <= i { + expected = "(Missing)" + expectedFmt = "(Missing)" + } else { + expected = args[i] + expectedFmt = fmt.Sprintf("(%[1]T=%[1]v)", expected) + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actualFmt, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +func diffArguments(expected Arguments, actual Arguments) string { + if len(expected) != len(actual) { + return fmt.Sprintf("Provided %v arguments, mocked for %v arguments", len(expected), len(actual)) + } + + for x := range expected { + if diffString := diff(expected[x], actual[x]); diffString != "" { + return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString) + } + } + + return "" +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return diff +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000..fc31160 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000..aeb73f8 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000..9d80f19 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000..2641dad --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000..d040775 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 0000000..281f782 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,106 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "TgO9gc7JIrSnT/T/VVzihTVPtvU=", + "path": "github.com/Masterminds/semver", + "revision": "c84ddcca87bf5a941b138dde832a7e20b0159ad8", + "revisionTime": "2018-08-07T14:24:31Z" + }, + { + "checksumSHA1": "KMTpIQS+cdSCzMN5TC6GfNLTv0o=", + "path": "github.com/ant0ine/go-json-rest/rest", + "revision": "ebb33769ae013bd5f518a8bac348c310dea768b8", + "revisionTime": "2017-09-13T04:12:08Z" + }, + { + "checksumSHA1": "sXuwczp4nGdlq20zJoGHmA8IYPg=", + "path": "github.com/ant0ine/go-json-rest/rest/trie", + "revision": "ebb33769ae013bd5f518a8bac348c310dea768b8", + "revisionTime": "2017-09-13T04:12:08Z" + }, + { + "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", + "origin": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew", + "path": "github.com/davecgh/go-spew/spew", + "revision": "f35b8ab0b5a2cef36673838d662e249dd9c94686", + "revisionTime": "2018-05-06T18:05:49Z" + }, + { + "checksumSHA1": "Z6rD6bmF8InGLXM7EHr8TMxMXNY=", + "path": "github.com/go-sql-driver/mysql", + "revision": "9181e3a86a19bacd63e68d43ae8b7b36320d8092", + "revisionTime": "2017-12-04T00:43:26Z" + }, + { + "checksumSHA1": "QdSIO+gp41BqNti0ZDVdXvfDvDo=", + "path": "github.com/gorilla/websocket", + "revision": "5ed622c449da6d44c3c8329331ff47a9e5844f71", + "revisionTime": "2018-06-05T20:25:52Z" + }, + { + "checksumSHA1": "ewGq4nGalpCQOHcmBTdAEQx1wW0=", + "path": "github.com/mitchellh/mapstructure", + "revision": "bb74f1db0675b241733089d5a1faa5dd8b0ef57b", + "revisionTime": "2018-05-11T14:21:26Z" + }, + { + "checksumSHA1": "LuFv4/jlrmFNnDb/5SCSEPAM9vU=", + "origin": "github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib", + "path": "github.com/pmezard/go-difflib/difflib", + "revision": "f35b8ab0b5a2cef36673838d662e249dd9c94686", + "revisionTime": "2018-05-06T18:05:49Z" + }, + { + "checksumSHA1": "RU9EFdqqxt1/x+PgsFVURN1J15U=", + "path": "github.com/sendgrid/rest", + "revision": "875828e14d98f3d9178936029d41bd0b832e9c49", + "revisionTime": "2018-09-05T23:40:47Z" + }, + { + "checksumSHA1": "I1WvjGTqwostzlkBqFzT/A8fPRw=", + "path": "github.com/sendgrid/sendgrid-go", + "revision": "8cb43f4ca4f5a26bb75861eaf5ee58c3302eacb1", + "revisionTime": "2018-09-05T23:35:24Z" + }, + { + "checksumSHA1": "FgS1K2JQNPD9NrRVDEtGMKk997k=", + "path": "github.com/sendgrid/sendgrid-go/helpers/mail", + "revision": "8cb43f4ca4f5a26bb75861eaf5ee58c3302eacb1", + "revisionTime": "2018-09-05T23:35:24Z" + }, + { + "checksumSHA1": "F7CERINyYGUQGpVUt5vF/jMTwTY=", + "origin": "github.com/stretchr/testify/vendor/github.com/stretchr/objx", + "path": "github.com/stretchr/objx", + "revision": "f35b8ab0b5a2cef36673838d662e249dd9c94686", + "revisionTime": "2018-05-06T18:05:49Z" + }, + { + "checksumSHA1": "c6pbpF7eowwO59phRTpF8cQ80Z0=", + "path": "github.com/stretchr/testify/assert", + "revision": "f35b8ab0b5a2cef36673838d662e249dd9c94686", + "revisionTime": "2018-05-06T18:05:49Z" + }, + { + "checksumSHA1": "WmBSFdqpdYRIkp0I408JIZ3LDMY=", + "path": "github.com/stretchr/testify/mock", + "revision": "f35b8ab0b5a2cef36673838d662e249dd9c94686", + "revisionTime": "2018-05-06T18:05:49Z" + }, + { + "checksumSHA1": "oCH3J96RWvO8W4xjix47PModpio=", + "path": "golang.org/x/crypto/bcrypt", + "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8", + "revisionTime": "2017-11-25T19:00:56Z" + }, + { + "checksumSHA1": "oVPHWesOmZ02vLq2fglGvf+AMgk=", + "path": "golang.org/x/crypto/blowfish", + "revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8", + "revisionTime": "2017-11-25T19:00:56Z" + } + ], + "rootPath": "github.com/openaccounting/oa-server" +}