deps: update vendor dependencies for S3-compatible storage

Updates AWS SDK and removes Blazer B2 dependency in favor of unified
S3-compatible approach. Includes configuration examples and documentation.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-07-01 23:07:58 +12:00
parent f99a866e13
commit 6558a09258
277 changed files with 117799 additions and 26 deletions

8
vendor/github.com/kurin/blazer/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# This is the list of Blazer authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
#
# Tag yourself.
Google LLC

13
vendor/github.com/kurin/blazer/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,13 @@
Copyright 2016, the Blazer authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

653
vendor/github.com/kurin/blazer/b2/b2.go generated vendored Normal file
View File

@@ -0,0 +1,653 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b2 provides a high-level interface to Backblaze's B2 cloud storage
// service.
//
// It is specifically designed to abstract away the Backblaze API details by
// providing familiar Go interfaces, specifically an io.Writer for object
// storage, and an io.Reader for object download. Handling of transient
// errors, including network and authentication timeouts, is transparent.
//
// Methods that perform network requests accept a context.Context argument.
// Callers should use the context's cancellation abilities to end requests
// early, or to provide timeout or deadline guarantees.
//
// This package is in development and may make API changes.
package b2
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"sync"
"time"
)
// Client is a Backblaze B2 client.
type Client struct {
backend beRootInterface
slock sync.Mutex
sWriters map[string]*Writer
sReaders map[string]*Reader
sMethods []methodCounter
opts clientOptions
}
// NewClient creates and returns a new Client with valid B2 service account
// tokens.
func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (*Client, error) {
c := &Client{
backend: &beRoot{
b2i: &b2Root{},
},
sMethods: []methodCounter{
newMethodCounter(time.Minute, time.Second),
newMethodCounter(time.Minute*5, time.Second),
newMethodCounter(time.Hour, time.Minute),
newMethodCounter(0, 0), // forever
},
}
opts = append(opts, client(c))
for _, f := range opts {
f(&c.opts)
}
if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil {
return nil, err
}
return c, nil
}
type clientOptions struct {
client *Client
transport http.RoundTripper
failSomeUploads bool
expireTokens bool
capExceeded bool
apiBase string
userAgents []string
writerOpts []WriterOption
}
// A ClientOption allows callers to adjust various per-client settings.
type ClientOption func(*clientOptions)
// UserAgent sets the User-Agent HTTP header. The default header is
// "blazer/<version>"; the value set here will be prepended to that. This can
// be set multiple times.
//
// A user agent is generally of the form "<product>/<version> (<comments>)".
func UserAgent(agent string) ClientOption {
return func(o *clientOptions) {
o.userAgents = append(o.userAgents, agent)
}
}
// APIBase returns a ClientOption specifying the URL root of API requests.
func APIBase(url string) ClientOption {
return func(o *clientOptions) {
o.apiBase = url
}
}
// Transport sets the underlying HTTP transport mechanism. If unset,
// http.DefaultTransport is used.
func Transport(rt http.RoundTripper) ClientOption {
return func(c *clientOptions) {
c.transport = rt
}
}
// FailSomeUploads requests intermittent upload failures from the B2 service.
// This is mostly useful for testing.
func FailSomeUploads() ClientOption {
return func(c *clientOptions) {
c.failSomeUploads = true
}
}
// ExpireSomeAuthTokens requests intermittent authentication failures from the
// B2 service.
func ExpireSomeAuthTokens() ClientOption {
return func(c *clientOptions) {
c.expireTokens = true
}
}
// ForceCapExceeded requests a cap limit from the B2 service. This causes all
// uploads to be treated as if they would exceed the configure B2 capacity.
func ForceCapExceeded() ClientOption {
return func(c *clientOptions) {
c.capExceeded = true
}
}
func client(cl *Client) ClientOption {
return func(c *clientOptions) {
c.client = cl
}
}
type clientTransport struct {
client *Client
rt http.RoundTripper
}
func (ct *clientTransport) RoundTrip(r *http.Request) (*http.Response, error) {
m := r.Header.Get("X-Blazer-Method")
t := ct.rt
if t == nil {
t = http.DefaultTransport
}
b := time.Now()
resp, err := t.RoundTrip(r)
e := time.Now()
if err != nil {
return resp, err
}
if m != "" && ct.client != nil {
ct.client.slock.Lock()
m := method{
name: m,
duration: e.Sub(b),
status: resp.StatusCode,
}
for _, counter := range ct.client.sMethods {
counter.record(m)
}
ct.client.slock.Unlock()
}
return resp, nil
}
// Bucket is a reference to a B2 bucket.
type Bucket struct {
b beBucketInterface
r beRootInterface
c *Client
urlPool *urlPool
}
type BucketType string
const (
UnknownType BucketType = ""
Private = "allPrivate"
Public = "allPublic"
Snapshot = "snapshot"
)
// BucketAttrs holds a bucket's metadata attributes.
type BucketAttrs struct {
// Type lists or sets the new bucket type. If Type is UnknownType during a
// bucket.Update, the type is not changed.
Type BucketType
// Info records user data, limited to ten keys. If nil during a
// bucket.Update, the existing bucket info is not modified. A bucket's
// metadata can be removed by updating with an empty map.
Info map[string]string
// Reports or sets bucket lifecycle rules. If nil during a bucket.Update,
// the rules are not modified. A bucket's rules can be removed by updating
// with an empty slice.
LifecycleRules []LifecycleRule
}
// A LifecycleRule describes an object's life cycle, namely how many days after
// uploading an object should be hidden, and after how many days hidden an
// object should be deleted. Multiple rules may not apply to the same file or
// set of files. Be careful when using this feature; it can (is designed to)
// delete your data.
type LifecycleRule struct {
// Prefix specifies all the files in the bucket to which this rule applies.
Prefix string
// DaysUploadedUntilHidden specifies the number of days after which a file
// will automatically be hidden. 0 means "do not automatically hide new
// files".
DaysNewUntilHidden int
// DaysHiddenUntilDeleted specifies the number of days after which a hidden
// file is deleted. 0 means "do not automatically delete hidden files".
DaysHiddenUntilDeleted int
}
type b2err struct {
err error
notFoundErr bool
isUpdateConflict bool
}
func (e b2err) Error() string {
return e.err.Error()
}
// IsNotExist reports whether a given error indicates that an object or bucket
// does not exist.
func IsNotExist(err error) bool {
berr, ok := err.(b2err)
if !ok {
return false
}
return berr.notFoundErr
}
const uploadURLPoolSize = 100
type urlPool struct {
ch chan beURLInterface
}
func newURLPool() *urlPool {
return &urlPool{ch: make(chan beURLInterface, uploadURLPoolSize)}
}
func (p *urlPool) get() beURLInterface {
select {
case ue := <-p.ch:
// if the channel has an upload URL available, use that
return ue
default:
// otherwise return nil, a new upload URL needs to be generated
return nil
}
}
func (p *urlPool) put(u beURLInterface) {
select {
case p.ch <- u:
// put the URL back if possible
default:
// if the channel is full, throw it away
}
}
// Bucket returns a bucket if it exists.
func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) {
buckets, err := c.backend.listBuckets(ctx)
if err != nil {
return nil, err
}
for _, bucket := range buckets {
if bucket.name() == name {
return &Bucket{
b: bucket,
r: c.backend,
c: c,
urlPool: newURLPool(),
}, nil
}
}
return nil, b2err{
err: fmt.Errorf("%s: bucket not found", name),
notFoundErr: true,
}
}
// NewBucket returns a bucket. The bucket is created with the given attributes
// if it does not already exist. If attrs is nil, it is created as a private
// bucket with no info metadata and no lifecycle rules.
func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) {
buckets, err := c.backend.listBuckets(ctx)
if err != nil {
return nil, err
}
for _, bucket := range buckets {
if bucket.name() == name {
return &Bucket{
b: bucket,
r: c.backend,
c: c,
urlPool: newURLPool(),
}, nil
}
}
if attrs == nil {
attrs = &BucketAttrs{Type: Private}
}
b, err := c.backend.createBucket(ctx, name, string(attrs.Type), attrs.Info, attrs.LifecycleRules)
if err != nil {
return nil, err
}
return &Bucket{
b: b,
r: c.backend,
c: c,
urlPool: newURLPool(),
}, err
}
// ListBuckets returns all the available buckets.
func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) {
bs, err := c.backend.listBuckets(ctx)
if err != nil {
return nil, err
}
var buckets []*Bucket
for _, b := range bs {
buckets = append(buckets, &Bucket{
b: b,
r: c.backend,
c: c,
urlPool: newURLPool(),
})
}
return buckets, nil
}
// IsUpdateConflict reports whether a given error is the result of a bucket
// update conflict.
func IsUpdateConflict(err error) bool {
e, ok := err.(b2err)
if !ok {
return false
}
return e.isUpdateConflict
}
// Update modifies the given bucket with new attributes. It is possible that
// this method could fail with an update conflict, in which case you should
// retrieve the latest bucket attributes with Attrs and try again.
func (b *Bucket) Update(ctx context.Context, attrs *BucketAttrs) error {
return b.b.updateBucket(ctx, attrs)
}
// Attrs retrieves and returns the current bucket's attributes.
func (b *Bucket) Attrs(ctx context.Context) (*BucketAttrs, error) {
bucket, err := b.c.Bucket(ctx, b.Name())
if err != nil {
return nil, err
}
b.b = bucket.b
return b.b.attrs(), nil
}
var bNotExist = regexp.MustCompile("Bucket.*does not exist")
// Delete removes a bucket. The bucket must be empty.
func (b *Bucket) Delete(ctx context.Context) error {
err := b.b.deleteBucket(ctx)
if err == nil {
return err
}
// So, the B2 documentation disagrees with the implementation here, and the
// error code is not really helpful. If the bucket doesn't exist, the error is
// 400, not 404, and the string is "Bucket <name> does not exist". However, the
// documentation says it will be "Bucket id <name> does not exist". In case
// they update the implementation to match the documentation, we're just going
// to regexp over the error message and hope it's okay.
if bNotExist.MatchString(err.Error()) {
return b2err{
err: err,
notFoundErr: true,
}
}
return err
}
// BaseURL returns the base URL to use for all files uploaded to this bucket.
func (b *Bucket) BaseURL() string {
return b.b.baseURL()
}
// Name returns the bucket's name.
func (b *Bucket) Name() string {
return b.b.name()
}
// Object represents a B2 object.
type Object struct {
attrs *Attrs
name string
f beFileInterface
b *Bucket
}
// Attrs holds an object's metadata.
type Attrs struct {
Name string // Not used on upload.
Size int64 // Not used on upload.
ContentType string // Used on upload, default is "application/octet-stream".
Status ObjectState // Not used on upload.
UploadTimestamp time.Time // Not used on upload.
SHA1 string // Can be "none" for large files. If set on upload, will be used for large files.
LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload.
Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys.
}
// Name returns an object's name
func (o *Object) Name() string {
return o.name
}
// Attrs returns an object's attributes.
func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
if err := o.ensure(ctx); err != nil {
return nil, err
}
fi, err := o.f.getFileInfo(ctx)
if err != nil {
return nil, err
}
name, sha, size, ct, info, st, stamp := fi.stats()
var state ObjectState
switch st {
case "upload":
state = Uploaded
case "start":
state = Started
case "hide":
state = Hider
case "folder":
state = Folder
}
var mtime time.Time
if v, ok := info["src_last_modified_millis"]; ok {
ms, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
mtime = time.Unix(ms/1e3, (ms%1e3)*1e6)
delete(info, "src_last_modified_millis")
}
if v, ok := info["large_file_sha1"]; ok {
sha = v
}
return &Attrs{
Name: name,
Size: size,
ContentType: ct,
UploadTimestamp: stamp,
SHA1: sha,
Info: info,
Status: state,
LastModified: mtime,
}, nil
}
// ObjectState represents the various states an object can be in.
type ObjectState int
const (
Unknown ObjectState = iota
// Started represents a large upload that has been started but not finished
// or canceled.
Started
// Uploaded represents an object that has finished uploading and is complete.
Uploaded
// Hider represents an object that exists only to hide another object. It
// cannot in itself be downloaded and, in particular, is not a hidden object.
Hider
// Folder is a special state given to non-objects that are returned during a
// List call with a ListDelimiter option.
Folder
)
// Object returns a reference to the named object in the bucket. Hidden
// objects cannot be referenced in this manner; they can only be found by
// finding the appropriate reference in ListObjects.
func (b *Bucket) Object(name string) *Object {
return &Object{
name: name,
b: b,
}
}
// URL returns the full URL to the given object.
func (o *Object) URL() string {
return fmt.Sprintf("%s/file/%s/%s", o.b.BaseURL(), o.b.Name(), o.name)
}
// NewWriter returns a new writer for the given object. Objects that are
// overwritten are not deleted, but are "hidden".
//
// Callers must close the writer when finished and check the error status.
func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer {
ctx, cancel := context.WithCancel(ctx)
w := &Writer{
o: o,
name: o.name,
ctx: ctx,
cancel: cancel,
}
for _, f := range o.b.c.opts.writerOpts {
f(w)
}
for _, f := range opts {
f(w)
}
return w
}
// NewRangeReader returns a reader for the given object, reading up to length
// bytes. If length is negative, the rest of the object is read.
func (o *Object) NewRangeReader(ctx context.Context, offset, length int64) *Reader {
ctx, cancel := context.WithCancel(ctx)
return &Reader{
ctx: ctx,
cancel: cancel,
o: o,
name: o.name,
chunks: make(map[int]*rchunk),
length: length,
offset: offset,
}
}
// NewReader returns a reader for the given object.
func (o *Object) NewReader(ctx context.Context) *Reader {
return o.NewRangeReader(ctx, 0, -1)
}
func (o *Object) ensure(ctx context.Context) error {
if o.f == nil {
f, err := o.b.getObject(ctx, o.name)
if err != nil {
return err
}
o.f = f.f
}
return nil
}
// Delete removes the given object.
func (o *Object) Delete(ctx context.Context) error {
if err := o.ensure(ctx); err != nil {
return err
}
return o.f.deleteFileVersion(ctx)
}
// Hide hides the object from name-based listing.
func (o *Object) Hide(ctx context.Context) error {
if err := o.ensure(ctx); err != nil {
return err
}
_, err := o.b.b.hideFile(ctx, o.name)
return err
}
// Reveal unhides (if hidden) the named object. If there are multiple objects
// of a given name, it will reveal the most recent.
func (b *Bucket) Reveal(ctx context.Context, name string) error {
iter := b.List(ctx, ListPrefix(name), ListHidden())
for iter.Next() {
obj := iter.Object()
if obj.Name() == name {
if obj.f.status() == "hide" {
return obj.Delete(ctx)
}
return nil
}
if obj.Name() > name {
break
}
}
return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
}
// I don't want to import all of ioutil for this.
type discard struct{}
func (discard) Write(p []byte) (int, error) {
return len(p), nil
}
func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
fr, err := b.b.downloadFileByName(ctx, name, 0, 1)
if err != nil {
return nil, err
}
io.Copy(discard{}, fr)
fr.Close()
return &Object{
name: name,
f: b.b.file(fr.id(), name),
b: b,
}, nil
}
// AuthToken returns an authorization token that can be used to access objects
// in a private bucket. Only objects that begin with prefix can be accessed.
// The token expires after the given duration.
func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) {
return b.b.getDownloadAuthorization(ctx, prefix, valid, "")
}
// AuthURL returns a URL for the given object with embedded token and,
// possibly, b2ContentDisposition arguments. Leave b2cd blank for no content
// disposition.
func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) {
token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd)
if err != nil {
return nil, err
}
urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token))
if b2cd != "" {
urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd))
}
u, err := url.Parse(urlString)
if err != nil {
return nil, err
}
return u, nil
}

774
vendor/github.com/kurin/blazer/b2/backend.go generated vendored Normal file
View File

@@ -0,0 +1,774 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"io"
"math/rand"
"time"
)
// This file wraps the baseline interfaces with backoff and retry semantics.
type beRootInterface interface {
backoff(error) time.Duration
reauth(error) bool
transient(error) bool
reupload(error) bool
authorizeAccount(context.Context, string, string, clientOptions) error
reauthorizeAccount(context.Context) error
createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error)
listBuckets(context.Context) ([]beBucketInterface, error)
createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error)
listKeys(context.Context, int, string) ([]beKeyInterface, string, error)
}
type beRoot struct {
account, key string
b2i b2RootInterface
options clientOptions
}
type beBucketInterface interface {
name() string
btype() BucketType
attrs() *BucketAttrs
id() string
updateBucket(context.Context, *BucketAttrs) error
deleteBucket(context.Context) error
getUploadURL(context.Context) (beURLInterface, error)
startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (beLargeFileInterface, error)
listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error)
listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error)
listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error)
downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error)
hideFile(context.Context, string) (beFileInterface, error)
getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
baseURL() string
file(string, string) beFileInterface
}
type beBucket struct {
b2bucket b2BucketInterface
ri beRootInterface
}
type beURLInterface interface {
uploadFile(context.Context, readResetter, int, string, string, string, map[string]string) (beFileInterface, error)
}
type beURL struct {
b2url b2URLInterface
ri beRootInterface
}
type beFileInterface interface {
name() string
size() int64
timestamp() time.Time
status() string
deleteFileVersion(context.Context) error
getFileInfo(context.Context) (beFileInfoInterface, error)
listParts(context.Context, int, int) ([]beFilePartInterface, int, error)
compileParts(int64, map[int]string) beLargeFileInterface
}
type beFile struct {
b2file b2FileInterface
url beURLInterface
ri beRootInterface
}
type beLargeFileInterface interface {
finishLargeFile(context.Context) (beFileInterface, error)
getUploadPartURL(context.Context) (beFileChunkInterface, error)
cancel(context.Context) error
}
type beLargeFile struct {
b2largeFile b2LargeFileInterface
ri beRootInterface
}
type beFileChunkInterface interface {
reload(context.Context) error
uploadPart(context.Context, readResetter, string, int, int) (int, error)
}
type beFileChunk struct {
b2fileChunk b2FileChunkInterface
ri beRootInterface
}
type beFileReaderInterface interface {
io.ReadCloser
stats() (int, string, string, map[string]string)
id() string
}
type beFileReader struct {
b2fileReader b2FileReaderInterface
ri beRootInterface
}
type beFileInfoInterface interface {
stats() (string, string, int64, string, map[string]string, string, time.Time)
}
type beFilePartInterface interface {
number() int
sha1() string
size() int64
}
type beFilePart struct {
b2filePart b2FilePartInterface
ri beRootInterface
}
type beFileInfo struct {
name string
sha string
size int64
ct string
info map[string]string
status string
stamp time.Time
}
type beKeyInterface interface {
del(context.Context) error
caps() []string
name() string
expires() time.Time
secret() string
id() string
}
type beKey struct {
b2i beRootInterface
k b2KeyInterface
}
func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) }
func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) }
func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) }
func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) }
func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
f := func() error {
if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil {
return err
}
r.account = account
r.key = key
r.options = c
return nil
}
return withBackoff(ctx, r, f)
}
func (r *beRoot) reauthorizeAccount(ctx context.Context) error {
return r.authorizeAccount(ctx, r.account, r.key, r.options)
}
func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) {
var bi beBucketInterface
f := func() error {
g := func() error {
bucket, err := r.b2i.createBucket(ctx, name, btype, info, rules)
if err != nil {
return err
}
bi = &beBucket{
b2bucket: bucket,
ri: r,
}
return nil
}
return withReauth(ctx, r, g)
}
if err := withBackoff(ctx, r, f); err != nil {
return nil, err
}
return bi, nil
}
func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) {
var buckets []beBucketInterface
f := func() error {
g := func() error {
bs, err := r.b2i.listBuckets(ctx)
if err != nil {
return err
}
for _, b := range bs {
buckets = append(buckets, &beBucket{
b2bucket: b,
ri: r,
})
}
return nil
}
return withReauth(ctx, r, g)
}
if err := withBackoff(ctx, r, f); err != nil {
return nil, err
}
return buckets, nil
}
func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) {
var k *beKey
f := func() error {
g := func() error {
got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix)
if err != nil {
return err
}
k = &beKey{
b2i: r,
k: got,
}
return nil
}
return withReauth(ctx, r, g)
}
if err := withBackoff(ctx, r, f); err != nil {
return nil, err
}
return k, nil
}
func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) {
var keys []beKeyInterface
var cur string
f := func() error {
g := func() error {
got, n, err := r.b2i.listKeys(ctx, max, next)
if err != nil {
return err
}
cur = n
for _, g := range got {
keys = append(keys, &beKey{
b2i: r,
k: g,
})
}
return nil
}
return withReauth(ctx, r, g)
}
if err := withBackoff(ctx, r, f); err != nil {
return nil, "", err
}
return keys, cur, nil
}
func (b *beBucket) name() string { return b.b2bucket.name() }
func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) }
func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() }
func (b *beBucket) id() string { return b.b2bucket.id() }
func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
f := func() error {
g := func() error {
return b.b2bucket.updateBucket(ctx, attrs)
}
return withReauth(ctx, b.ri, g)
}
return withBackoff(ctx, b.ri, f)
}
func (b *beBucket) deleteBucket(ctx context.Context) error {
f := func() error {
g := func() error {
return b.b2bucket.deleteBucket(ctx)
}
return withReauth(ctx, b.ri, g)
}
return withBackoff(ctx, b.ri, f)
}
func (b *beBucket) getUploadURL(ctx context.Context) (beURLInterface, error) {
var url beURLInterface
f := func() error {
g := func() error {
u, err := b.b2bucket.getUploadURL(ctx)
if err != nil {
return err
}
url = &beURL{
b2url: u,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return url, nil
}
func (b *beBucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (beLargeFileInterface, error) {
var file beLargeFileInterface
f := func() error {
g := func() error {
f, err := b.b2bucket.startLargeFile(ctx, name, ct, info)
if err != nil {
return err
}
file = &beLargeFile{
b2largeFile: f,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return file, nil
}
func (b *beBucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]beFileInterface, string, error) {
var cont string
var files []beFileInterface
f := func() error {
g := func() error {
fs, c, err := b.b2bucket.listFileNames(ctx, count, continuation, prefix, delimiter)
if err != nil {
return err
}
cont = c
for _, f := range fs {
files = append(files, &beFile{
b2file: f,
ri: b.ri,
})
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, "", err
}
return files, cont, nil
}
func (b *beBucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]beFileInterface, string, string, error) {
var name, id string
var files []beFileInterface
f := func() error {
g := func() error {
fs, n, d, err := b.b2bucket.listFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
if err != nil {
return err
}
name = n
id = d
for _, f := range fs {
files = append(files, &beFile{
b2file: f,
ri: b.ri,
})
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, "", "", err
}
return files, name, id, nil
}
func (b *beBucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]beFileInterface, string, error) {
var cont string
var files []beFileInterface
f := func() error {
g := func() error {
fs, c, err := b.b2bucket.listUnfinishedLargeFiles(ctx, count, continuation)
if err != nil {
return err
}
cont = c
for _, f := range fs {
files = append(files, &beFile{
b2file: f,
ri: b.ri,
})
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, "", err
}
return files, cont, nil
}
func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (beFileReaderInterface, error) {
var reader beFileReaderInterface
f := func() error {
g := func() error {
fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size)
if err != nil {
return err
}
reader = &beFileReader{
b2fileReader: fr,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return reader, nil
}
func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface, error) {
var file beFileInterface
f := func() error {
g := func() error {
f, err := b.b2bucket.hideFile(ctx, name)
if err != nil {
return err
}
file = &beFile{
b2file: f,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return file, nil
}
func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
var tok string
f := func() error {
g := func() error {
t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s)
if err != nil {
return err
}
tok = t
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return "", err
}
return tok, nil
}
func (b *beBucket) baseURL() string {
return b.b2bucket.baseURL()
}
func (b *beBucket) file(id, name string) beFileInterface {
return &beFile{
b2file: b.b2bucket.file(id, name),
ri: b.ri,
}
}
func (b *beURL) uploadFile(ctx context.Context, r readResetter, size int, name, ct, sha1 string, info map[string]string) (beFileInterface, error) {
var file beFileInterface
f := func() error {
if err := r.Reset(); err != nil {
return err
}
f, err := b.b2url.uploadFile(ctx, r, size, name, ct, sha1, info)
if err != nil {
return err
}
file = &beFile{
b2file: f,
url: b,
ri: b.ri,
}
return nil
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return file, nil
}
func (b *beFile) deleteFileVersion(ctx context.Context) error {
f := func() error {
g := func() error {
return b.b2file.deleteFileVersion(ctx)
}
return withReauth(ctx, b.ri, g)
}
return withBackoff(ctx, b.ri, f)
}
func (b *beFile) size() int64 {
return b.b2file.size()
}
func (b *beFile) name() string {
return b.b2file.name()
}
func (b *beFile) timestamp() time.Time {
return b.b2file.timestamp()
}
func (b *beFile) status() string {
return b.b2file.status()
}
func (b *beFile) getFileInfo(ctx context.Context) (beFileInfoInterface, error) {
var fileInfo beFileInfoInterface
f := func() error {
g := func() error {
fi, err := b.b2file.getFileInfo(ctx)
if err != nil {
return err
}
name, sha, size, ct, info, status, stamp := fi.stats()
fileInfo = &beFileInfo{
name: name,
sha: sha,
size: size,
ct: ct,
info: info,
status: status,
stamp: stamp,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return fileInfo, nil
}
func (b *beFile) listParts(ctx context.Context, next, count int) ([]beFilePartInterface, int, error) {
var fpi []beFilePartInterface
var rnxt int
f := func() error {
g := func() error {
ps, n, err := b.b2file.listParts(ctx, next, count)
if err != nil {
return err
}
rnxt = n
for _, p := range ps {
fpi = append(fpi, &beFilePart{
b2filePart: p,
ri: b.ri,
})
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, 0, err
}
return fpi, rnxt, nil
}
func (b *beFile) compileParts(size int64, seen map[int]string) beLargeFileInterface {
return &beLargeFile{
b2largeFile: b.b2file.compileParts(size, seen),
ri: b.ri,
}
}
func (b *beLargeFile) getUploadPartURL(ctx context.Context) (beFileChunkInterface, error) {
var chunk beFileChunkInterface
f := func() error {
g := func() error {
fc, err := b.b2largeFile.getUploadPartURL(ctx)
if err != nil {
return err
}
chunk = &beFileChunk{
b2fileChunk: fc,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return chunk, nil
}
func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, error) {
var file beFileInterface
f := func() error {
g := func() error {
f, err := b.b2largeFile.finishLargeFile(ctx)
if err != nil {
return err
}
file = &beFile{
b2file: f,
ri: b.ri,
}
return nil
}
return withReauth(ctx, b.ri, g)
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return nil, err
}
return file, nil
}
func (b *beLargeFile) cancel(ctx context.Context) error {
f := func() error {
g := func() error {
return b.b2largeFile.cancel(ctx)
}
return withReauth(ctx, b.ri, g)
}
return withBackoff(ctx, b.ri, f)
}
func (b *beFileChunk) reload(ctx context.Context) error {
f := func() error {
g := func() error {
return b.b2fileChunk.reload(ctx)
}
return withReauth(ctx, b.ri, g)
}
return withBackoff(ctx, b.ri, f)
}
func (b *beFileChunk) uploadPart(ctx context.Context, r readResetter, sha1 string, size, index int) (int, error) {
// no re-auth; pass it back up to the caller so they can get an new upload URI and token
// TODO: we should handle that here probably
var i int
f := func() error {
if err := r.Reset(); err != nil {
return err
}
j, err := b.b2fileChunk.uploadPart(ctx, r, sha1, size, index)
if err != nil {
return err
}
i = j
return nil
}
if err := withBackoff(ctx, b.ri, f); err != nil {
return 0, err
}
return i, nil
}
func (b *beFileReader) Read(p []byte) (int, error) {
return b.b2fileReader.Read(p)
}
func (b *beFileReader) Close() error {
return b.b2fileReader.Close()
}
func (b *beFileReader) stats() (int, string, string, map[string]string) {
return b.b2fileReader.stats()
}
func (b *beFileReader) id() string { return b.b2fileReader.id() }
func (b *beFileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
return b.name, b.sha, b.size, b.ct, b.info, b.status, b.stamp
}
func (b *beFilePart) number() int { return b.b2filePart.number() }
func (b *beFilePart) sha1() string { return b.b2filePart.sha1() }
func (b *beFilePart) size() int64 { return b.b2filePart.size() }
func (b *beKey) del(ctx context.Context) error { return b.k.del(ctx) }
func (b *beKey) caps() []string { return b.k.caps() }
func (b *beKey) name() string { return b.k.name() }
func (b *beKey) expires() time.Time { return b.k.expires() }
func (b *beKey) secret() string { return b.k.secret() }
func (b *beKey) id() string { return b.k.id() }
func jitter(d time.Duration) time.Duration {
f := float64(d)
f /= 50
f += f * (rand.Float64() - 0.5)
return time.Duration(f)
}
func getBackoff(d time.Duration) time.Duration {
if d > 30*time.Second {
return 30*time.Second + jitter(d)
}
return d*2 + jitter(d*2)
}
var after = time.After
func withBackoff(ctx context.Context, ri beRootInterface, f func() error) error {
backoff := 500 * time.Millisecond
for {
err := f()
if !ri.transient(err) {
return err
}
bo := ri.backoff(err)
if bo > 0 {
backoff = bo
} else {
backoff = getBackoff(backoff)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-after(backoff):
}
}
}
func withReauth(ctx context.Context, ri beRootInterface, f func() error) error {
err := f()
if ri.reauth(err) {
if err := ri.reauthorizeAccount(ctx); err != nil {
return err
}
err = f()
}
return err
}

517
vendor/github.com/kurin/blazer/b2/baseline.go generated vendored Normal file
View File

@@ -0,0 +1,517 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"io"
"net/http"
"time"
"github.com/kurin/blazer/base"
)
// This file wraps the base package in a thin layer, for testing. It should be
// the only file in b2 that imports base.
type b2RootInterface interface {
authorizeAccount(context.Context, string, string, clientOptions) error
transient(error) bool
backoff(error) time.Duration
reauth(error) bool
reupload(error) bool
createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error)
listBuckets(context.Context) ([]b2BucketInterface, error)
createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error)
listKeys(context.Context, int, string) ([]b2KeyInterface, string, error)
}
type b2BucketInterface interface {
name() string
btype() string
attrs() *BucketAttrs
id() string
updateBucket(context.Context, *BucketAttrs) error
deleteBucket(context.Context) error
getUploadURL(context.Context) (b2URLInterface, error)
startLargeFile(ctx context.Context, name, contentType string, info map[string]string) (b2LargeFileInterface, error)
listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error)
listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error)
listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error)
downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error)
hideFile(context.Context, string) (b2FileInterface, error)
getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
baseURL() string
file(string, string) b2FileInterface
}
type b2URLInterface interface {
reload(context.Context) error
uploadFile(context.Context, io.Reader, int, string, string, string, map[string]string) (b2FileInterface, error)
}
type b2FileInterface interface {
name() string
size() int64
timestamp() time.Time
status() string
deleteFileVersion(context.Context) error
getFileInfo(context.Context) (b2FileInfoInterface, error)
listParts(context.Context, int, int) ([]b2FilePartInterface, int, error)
compileParts(int64, map[int]string) b2LargeFileInterface
}
type b2LargeFileInterface interface {
finishLargeFile(context.Context) (b2FileInterface, error)
getUploadPartURL(context.Context) (b2FileChunkInterface, error)
cancel(context.Context) error
}
type b2FileChunkInterface interface {
reload(context.Context) error
uploadPart(context.Context, io.Reader, string, int, int) (int, error)
}
type b2FileReaderInterface interface {
io.ReadCloser
stats() (int, string, string, map[string]string)
id() string
}
type b2FileInfoInterface interface {
stats() (string, string, int64, string, map[string]string, string, time.Time) // bleck
}
type b2FilePartInterface interface {
number() int
sha1() string
size() int64
}
type b2KeyInterface interface {
del(context.Context) error
caps() []string
name() string
expires() time.Time
secret() string
id() string
}
type b2Root struct {
b *base.B2
}
type b2Bucket struct {
b *base.Bucket
}
type b2URL struct {
b *base.URL
}
type b2File struct {
b *base.File
}
type b2LargeFile struct {
b *base.LargeFile
}
type b2FileChunk struct {
b *base.FileChunk
}
type b2FileReader struct {
b *base.FileReader
}
type b2FileInfo struct {
b *base.FileInfo
}
type b2FilePart struct {
b *base.FilePart
}
type b2Key struct {
b *base.Key
}
func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
var aopts []base.AuthOption
ct := &clientTransport{client: c.client}
if c.transport != nil {
ct.rt = c.transport
}
aopts = append(aopts, base.Transport(ct))
if c.failSomeUploads {
aopts = append(aopts, base.FailSomeUploads())
}
if c.expireTokens {
aopts = append(aopts, base.ExpireSomeAuthTokens())
}
if c.capExceeded {
aopts = append(aopts, base.ForceCapExceeded())
}
if c.apiBase != "" {
aopts = append(aopts, base.SetAPIBase(c.apiBase))
}
for _, agent := range c.userAgents {
aopts = append(aopts, base.UserAgent(agent))
}
nb, err := base.AuthorizeAccount(ctx, account, key, aopts...)
if err != nil {
return err
}
if b.b == nil {
b.b = nb
return nil
}
b.b.Update(nb)
return nil
}
func (*b2Root) backoff(err error) time.Duration {
if base.Action(err) != base.Retry {
return 0
}
return base.Backoff(err)
}
func (*b2Root) reauth(err error) bool {
return base.Action(err) == base.ReAuthenticate
}
func (*b2Root) reupload(err error) bool {
return base.Action(err) == base.AttemptNewUpload
}
func (*b2Root) transient(err error) bool {
return base.Action(err) == base.Retry
}
func (b *b2Root) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (b2BucketInterface, error) {
var baseRules []base.LifecycleRule
for _, rule := range rules {
baseRules = append(baseRules, base.LifecycleRule{
DaysNewUntilHidden: rule.DaysNewUntilHidden,
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
Prefix: rule.Prefix,
})
}
bucket, err := b.b.CreateBucket(ctx, name, btype, info, baseRules)
if err != nil {
return nil, err
}
return &b2Bucket{bucket}, nil
}
func (b *b2Root) listBuckets(ctx context.Context) ([]b2BucketInterface, error) {
buckets, err := b.b.ListBuckets(ctx)
if err != nil {
return nil, err
}
var rtn []b2BucketInterface
for _, bucket := range buckets {
rtn = append(rtn, &b2Bucket{bucket})
}
return rtn, err
}
func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
if attrs == nil {
return nil
}
if attrs.Type != UnknownType {
b.b.Type = string(attrs.Type)
}
if attrs.Info != nil {
b.b.Info = attrs.Info
}
if attrs.LifecycleRules != nil {
rules := []base.LifecycleRule{}
for _, rule := range attrs.LifecycleRules {
rules = append(rules, base.LifecycleRule{
DaysNewUntilHidden: rule.DaysNewUntilHidden,
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
Prefix: rule.Prefix,
})
}
b.b.LifecycleRules = rules
}
newBucket, err := b.b.Update(ctx)
if err == nil {
b.b = newBucket
}
code, _ := base.Code(err)
if code == 409 {
return b2err{
err: err,
isUpdateConflict: true,
}
}
return err
}
func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) {
k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix)
if err != nil {
return nil, err
}
return &b2Key{k}, nil
}
func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) {
keys, next, err := b.b.ListKeys(ctx, max, next)
if err != nil {
return nil, "", err
}
var k []b2KeyInterface
for _, key := range keys {
k = append(k, &b2Key{key})
}
return k, next, nil
}
func (b *b2Bucket) deleteBucket(ctx context.Context) error {
return b.b.DeleteBucket(ctx)
}
func (b *b2Bucket) name() string {
return b.b.Name
}
func (b *b2Bucket) btype() string {
return b.b.Type
}
func (b *b2Bucket) attrs() *BucketAttrs {
var rules []LifecycleRule
for _, rule := range b.b.LifecycleRules {
rules = append(rules, LifecycleRule{
DaysNewUntilHidden: rule.DaysNewUntilHidden,
DaysHiddenUntilDeleted: rule.DaysHiddenUntilDeleted,
Prefix: rule.Prefix,
})
}
return &BucketAttrs{
LifecycleRules: rules,
Info: b.b.Info,
Type: BucketType(b.b.Type),
}
}
func (b *b2Bucket) id() string { return b.b.ID }
func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) {
url, err := b.b.GetUploadURL(ctx)
if err != nil {
return nil, err
}
return &b2URL{url}, nil
}
func (b *b2Bucket) startLargeFile(ctx context.Context, name, ct string, info map[string]string) (b2LargeFileInterface, error) {
lf, err := b.b.StartLargeFile(ctx, name, ct, info)
if err != nil {
return nil, err
}
return &b2LargeFile{lf}, nil
}
func (b *b2Bucket) listFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]b2FileInterface, string, error) {
fs, c, err := b.b.ListFileNames(ctx, count, continuation, prefix, delimiter)
if err != nil {
return nil, "", err
}
var files []b2FileInterface
for _, f := range fs {
files = append(files, &b2File{f})
}
return files, c, nil
}
func (b *b2Bucket) listFileVersions(ctx context.Context, count int, nextName, nextID, prefix, delimiter string) ([]b2FileInterface, string, string, error) {
fs, name, id, err := b.b.ListFileVersions(ctx, count, nextName, nextID, prefix, delimiter)
if err != nil {
return nil, "", "", err
}
var files []b2FileInterface
for _, f := range fs {
files = append(files, &b2File{f})
}
return files, name, id, nil
}
func (b *b2Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]b2FileInterface, string, error) {
fs, cont, err := b.b.ListUnfinishedLargeFiles(ctx, count, continuation)
if err != nil {
return nil, "", err
}
var files []b2FileInterface
for _, f := range fs {
files = append(files, &b2File{f})
}
return files, cont, nil
}
func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) {
fr, err := b.b.DownloadFileByName(ctx, name, offset, size)
if err != nil {
code, _ := base.Code(err)
switch code {
case http.StatusRequestedRangeNotSatisfiable:
return nil, errNoMoreContent
case http.StatusNotFound:
return nil, b2err{err: err, notFoundErr: true}
}
return nil, err
}
return &b2FileReader{fr}, nil
}
func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface, error) {
f, err := b.b.HideFile(ctx, name)
if err != nil {
return nil, err
}
return &b2File{f}, nil
}
func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
return b.b.GetDownloadAuthorization(ctx, p, v, s)
}
func (b *b2Bucket) baseURL() string {
return b.b.BaseURL()
}
func (b *b2Bucket) file(id, name string) b2FileInterface { return &b2File{b.b.File(id, name)} }
func (b *b2URL) uploadFile(ctx context.Context, r io.Reader, size int, name, contentType, sha1 string, info map[string]string) (b2FileInterface, error) {
file, err := b.b.UploadFile(ctx, r, size, name, contentType, sha1, info)
if err != nil {
return nil, err
}
return &b2File{file}, nil
}
func (b *b2URL) reload(ctx context.Context) error {
return b.b.Reload(ctx)
}
func (b *b2File) deleteFileVersion(ctx context.Context) error {
return b.b.DeleteFileVersion(ctx)
}
func (b *b2File) name() string {
return b.b.Name
}
func (b *b2File) size() int64 {
return b.b.Size
}
func (b *b2File) timestamp() time.Time {
return b.b.Timestamp
}
func (b *b2File) status() string {
return b.b.Status
}
func (b *b2File) getFileInfo(ctx context.Context) (b2FileInfoInterface, error) {
if b.b.Info != nil {
return &b2FileInfo{b.b.Info}, nil
}
fi, err := b.b.GetFileInfo(ctx)
if err != nil {
return nil, err
}
return &b2FileInfo{fi}, nil
}
func (b *b2File) listParts(ctx context.Context, next, count int) ([]b2FilePartInterface, int, error) {
parts, n, err := b.b.ListParts(ctx, next, count)
if err != nil {
return nil, 0, err
}
var rtn []b2FilePartInterface
for _, part := range parts {
rtn = append(rtn, &b2FilePart{part})
}
return rtn, n, nil
}
func (b *b2File) compileParts(size int64, seen map[int]string) b2LargeFileInterface {
return &b2LargeFile{b.b.CompileParts(size, seen)}
}
func (b *b2LargeFile) finishLargeFile(ctx context.Context) (b2FileInterface, error) {
f, err := b.b.FinishLargeFile(ctx)
if err != nil {
return nil, err
}
return &b2File{f}, nil
}
func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterface, error) {
c, err := b.b.GetUploadPartURL(ctx)
if err != nil {
return nil, err
}
return &b2FileChunk{c}, nil
}
func (b *b2LargeFile) cancel(ctx context.Context) error {
return b.b.CancelLargeFile(ctx)
}
func (b *b2FileChunk) reload(ctx context.Context) error {
return b.b.Reload(ctx)
}
func (b *b2FileChunk) uploadPart(ctx context.Context, r io.Reader, sha1 string, size, index int) (int, error) {
return b.b.UploadPart(ctx, r, sha1, size, index)
}
func (b *b2FileReader) Read(p []byte) (int, error) {
return b.b.Read(p)
}
func (b *b2FileReader) Close() error {
return b.b.Close()
}
func (b *b2FileReader) stats() (int, string, string, map[string]string) {
return b.b.ContentLength, b.b.ContentType, b.b.SHA1, b.b.Info
}
func (b *b2FileReader) id() string { return b.b.ID }
func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string, string, time.Time) {
return b.b.Name, b.b.SHA1, b.b.Size, b.b.ContentType, b.b.Info, b.b.Status, b.b.Timestamp
}
func (b *b2FilePart) number() int { return b.b.Number }
func (b *b2FilePart) sha1() string { return b.b.SHA1 }
func (b *b2FilePart) size() int64 { return b.b.Size }
func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) }
func (b *b2Key) caps() []string { return b.b.Capabilities }
func (b *b2Key) name() string { return b.b.Name }
func (b *b2Key) expires() time.Time { return b.b.Expires }
func (b *b2Key) secret() string { return b.b.Secret }
func (b *b2Key) id() string { return b.b.ID }

185
vendor/github.com/kurin/blazer/b2/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"bytes"
"crypto/sha1"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"strings"
"sync"
)
type readResetter interface {
Read([]byte) (int, error)
Reset() error
}
type resetter struct {
rs io.ReadSeeker
}
func (r resetter) Read(p []byte) (int, error) { return r.rs.Read(p) }
func (r resetter) Reset() error { _, err := r.rs.Seek(0, 0); return err }
func newResetter(p []byte) readResetter { return resetter{rs: bytes.NewReader(p)} }
type writeBuffer interface {
io.Writer
Len() int
Reader() (readResetter, error)
Hash() string // sha1 or whatever it is
Close() error
}
// nonBuffer doesn't buffer anything, but passes values directly from the
// source readseeker. Many nonBuffers can point at different parts of the same
// underlying source, and be accessed by multiple goroutines simultaneously.
func newNonBuffer(rs io.ReaderAt, offset, size int64) writeBuffer {
return &nonBuffer{
r: io.NewSectionReader(rs, offset, size),
size: int(size),
hsh: sha1.New(),
}
}
type nonBuffer struct {
r *io.SectionReader
size int
hsh hash.Hash
isEOF bool
buf *strings.Reader
}
func (nb *nonBuffer) Len() int { return nb.size + 40 }
func (nb *nonBuffer) Hash() string { return "hex_digits_at_end" }
func (nb *nonBuffer) Close() error { return nil }
func (nb *nonBuffer) Reader() (readResetter, error) { return nb, nil }
func (nb *nonBuffer) Write([]byte) (int, error) { return 0, errors.New("writes not supported") }
func (nb *nonBuffer) Read(p []byte) (int, error) {
if nb.isEOF {
return nb.buf.Read(p)
}
n, err := io.TeeReader(nb.r, nb.hsh).Read(p)
if err == io.EOF {
err = nil
nb.isEOF = true
nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.hsh.Sum(nil)))
}
return n, err
}
func (nb *nonBuffer) Reset() error {
nb.hsh.Reset()
nb.isEOF = false
_, err := nb.r.Seek(0, 0)
return err
}
type memoryBuffer struct {
buf *bytes.Buffer
hsh hash.Hash
w io.Writer
mux sync.Mutex
}
var bufpool *sync.Pool
func init() {
bufpool = &sync.Pool{}
bufpool.New = func() interface{} { return &bytes.Buffer{} }
}
func newMemoryBuffer() *memoryBuffer {
mb := &memoryBuffer{
hsh: sha1.New(),
}
mb.buf = bufpool.Get().(*bytes.Buffer)
mb.w = io.MultiWriter(mb.hsh, mb.buf)
return mb
}
func (mb *memoryBuffer) Write(p []byte) (int, error) { return mb.w.Write(p) }
func (mb *memoryBuffer) Len() int { return mb.buf.Len() }
func (mb *memoryBuffer) Reader() (readResetter, error) { return newResetter(mb.buf.Bytes()), nil }
func (mb *memoryBuffer) Hash() string { return fmt.Sprintf("%x", mb.hsh.Sum(nil)) }
func (mb *memoryBuffer) Close() error {
mb.mux.Lock()
defer mb.mux.Unlock()
if mb.buf == nil {
return nil
}
mb.buf.Truncate(0)
bufpool.Put(mb.buf)
mb.buf = nil
return nil
}
type fileBuffer struct {
f *os.File
hsh hash.Hash
w io.Writer
s int
}
func newFileBuffer(loc string) (*fileBuffer, error) {
f, err := ioutil.TempFile(loc, "blazer")
if err != nil {
return nil, err
}
fb := &fileBuffer{
f: f,
hsh: sha1.New(),
}
fb.w = io.MultiWriter(fb.f, fb.hsh)
return fb, nil
}
func (fb *fileBuffer) Write(p []byte) (int, error) {
n, err := fb.w.Write(p)
fb.s += n
return n, err
}
func (fb *fileBuffer) Len() int { return fb.s }
func (fb *fileBuffer) Hash() string { return fmt.Sprintf("%x", fb.hsh.Sum(nil)) }
func (fb *fileBuffer) Reader() (readResetter, error) {
if _, err := fb.f.Seek(0, 0); err != nil {
return nil, err
}
return &fr{f: fb.f}, nil
}
func (fb *fileBuffer) Close() error {
fb.f.Close()
return os.Remove(fb.f.Name())
}
// wraps *os.File so that the http package doesn't see it as an io.Closer
type fr struct {
f *os.File
}
func (r *fr) Read(p []byte) (int, error) { return r.f.Read(p) }
func (r *fr) Reset() error { _, err := r.f.Seek(0, 0); return err }

331
vendor/github.com/kurin/blazer/b2/iterator.go generated vendored Normal file
View File

@@ -0,0 +1,331 @@
// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"io"
"sync"
)
// List returns an iterator for selecting objects in a bucket. The default
// behavior, with no options, is to list all currently un-hidden objects.
func (b *Bucket) List(ctx context.Context, opts ...ListOption) *ObjectIterator {
o := &ObjectIterator{
bucket: b,
ctx: ctx,
}
for _, opt := range opts {
opt(&o.opts)
}
return o
}
// ObjectIterator abtracts away the tricky bits of iterating over a bucket's
// contents.
//
// It is intended to be called in a loop:
// for iter.Next() {
// obj := iter.Object()
// // act on obj
// }
// if err := iter.Err(); err != nil {
// // handle err
// }
type ObjectIterator struct {
bucket *Bucket
ctx context.Context
final bool
err error
idx int
c *cursor
opts objectIteratorOptions
objs []*Object
init sync.Once
l lister
count int
}
type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error)
func (o *ObjectIterator) page(ctx context.Context) error {
if o.opts.locker != nil {
o.opts.locker.Lock()
defer o.opts.locker.Unlock()
}
objs, c, err := o.l(ctx, o.count, o.c)
if err != nil && err != io.EOF {
if bNotExist.MatchString(err.Error()) {
return b2err{
err: err,
notFoundErr: true,
}
}
return err
}
o.c = c
o.objs = objs
o.idx = 0
if err == io.EOF {
o.final = true
}
return nil
}
// Next advances the iterator to the next object. It should be called before
// any calls to Object(). If Next returns true, then the next call to Object()
// will be valid. Once Next returns false, it is important to check the return
// value of Err().
func (o *ObjectIterator) Next() bool {
o.init.Do(func() {
o.count = o.opts.pageSize
if o.count < 0 || o.count > 1000 {
o.count = 1000
}
switch {
case o.opts.unfinished:
o.l = o.bucket.listUnfinishedLargeFiles
if o.count > 100 {
o.count = 100
}
case o.opts.hidden:
o.l = o.bucket.listObjects
default:
o.l = o.bucket.listCurrentObjects
}
o.c = &cursor{
prefix: o.opts.prefix,
delimiter: o.opts.delimiter,
}
})
if o.err != nil {
return false
}
if o.ctx.Err() != nil {
o.err = o.ctx.Err()
return false
}
if o.idx >= len(o.objs) {
if o.final {
o.err = io.EOF
return false
}
if err := o.page(o.ctx); err != nil {
o.err = err
return false
}
return o.Next()
}
o.idx++
return true
}
// Object returns the current object.
func (o *ObjectIterator) Object() *Object {
return o.objs[o.idx-1]
}
// Err returns the current error or nil. If Next() returns false and Err() is
// nil, then all objects have been seen.
func (o *ObjectIterator) Err() error {
if o.err == io.EOF {
return nil
}
return o.err
}
type objectIteratorOptions struct {
hidden bool
unfinished bool
prefix string
delimiter string
pageSize int
locker sync.Locker
}
// A ListOption alters the default behavor of List.
type ListOption func(*objectIteratorOptions)
// ListHidden will include hidden objects in the output.
func ListHidden() ListOption {
return func(o *objectIteratorOptions) {
o.hidden = true
}
}
// ListUnfinished will list unfinished large file operations instead of
// existing objects.
func ListUnfinished() ListOption {
return func(o *objectIteratorOptions) {
o.unfinished = true
}
}
// ListPrefix will restrict the output to objects whose names begin with
// prefix.
func ListPrefix(pfx string) ListOption {
return func(o *objectIteratorOptions) {
o.prefix = pfx
}
}
// ListDelimiter denotes the path separator. If set, object listings will be
// truncated at this character.
//
// For example, if the bucket contains objects foo/bar, foo/baz, and foo,
// then a delimiter of "/" will cause the listing to return "foo" and "foo/".
// Otherwise, the listing would have returned all object names.
//
// Note that objects returned that end in the delimiter may not be actual
// objects, e.g. you cannot read from (or write to, or delete) an object
// "foo/", both because no actual object exists and because B2 disallows object
// names that end with "/". If you want to ensure that all objects returned
// are actual objects, leave this unset.
func ListDelimiter(delimiter string) ListOption {
return func(o *objectIteratorOptions) {
o.delimiter = delimiter
}
}
// ListPageSize configures the iterator to request the given number of objects
// per network round-trip. The default (and maximum) is 1000 objects, except
// for unfinished large files, which is 100.
func ListPageSize(count int) ListOption {
return func(o *objectIteratorOptions) {
o.pageSize = count
}
}
// ListLocker passes the iterator a lock which will be held during network
// round-trips.
func ListLocker(l sync.Locker) ListOption {
return func(o *objectIteratorOptions) {
o.locker = l
}
}
type cursor struct {
// Prefix limits the listed objects to those that begin with this string.
prefix string
// Delimiter denotes the path separator. If set, object listings will be
// truncated at this character.
//
// For example, if the bucket contains objects foo/bar, foo/baz, and foo,
// then a delimiter of "/" will cause the listing to return "foo" and "foo/".
// Otherwise, the listing would have returned all object names.
//
// Note that objects returned that end in the delimiter may not be actual
// objects, e.g. you cannot read from (or write to, or delete) an object "foo/",
// both because no actual object exists and because B2 disallows object names
// that end with "/". If you want to ensure that all objects returned by
// ListObjects and ListCurrentObjects are actual objects, leave this unset.
delimiter string
name string
id string
}
func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
if c == nil {
c = &cursor{}
}
fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter)
if err != nil {
return nil, nil, err
}
var next *cursor
if name != "" && id != "" {
next = &cursor{
prefix: c.prefix,
delimiter: c.delimiter,
name: name,
id: id,
}
}
var objects []*Object
for _, f := range fs {
objects = append(objects, &Object{
name: f.name(),
f: f,
b: b,
})
}
var rtnErr error
if len(objects) == 0 || next == nil {
rtnErr = io.EOF
}
return objects, next, rtnErr
}
func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
if c == nil {
c = &cursor{}
}
fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter)
if err != nil {
return nil, nil, err
}
var next *cursor
if name != "" {
next = &cursor{
prefix: c.prefix,
delimiter: c.delimiter,
name: name,
}
}
var objects []*Object
for _, f := range fs {
objects = append(objects, &Object{
name: f.name(),
f: f,
b: b,
})
}
var rtnErr error
if len(objects) == 0 || next == nil {
rtnErr = io.EOF
}
return objects, next, rtnErr
}
func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
if c == nil {
c = &cursor{}
}
fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name)
if err != nil {
return nil, nil, err
}
var next *cursor
if name != "" {
next = &cursor{
name: name,
}
}
var objects []*Object
for _, f := range fs {
objects = append(objects, &Object{
name: f.name(),
f: f,
b: b,
})
}
var rtnErr error
if len(objects) == 0 || next == nil {
rtnErr = io.EOF
}
return objects, next, rtnErr
}

156
vendor/github.com/kurin/blazer/b2/key.go generated vendored Normal file
View File

@@ -0,0 +1,156 @@
// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"errors"
"io"
"time"
)
// Key is a B2 application key. A Key grants limited access on a global or
// per-bucket basis.
type Key struct {
c *Client
k beKeyInterface
}
// Capabilities returns the list of capabilites granted by this application
// key.
func (k *Key) Capabilities() []string { return k.k.caps() }
// Name returns the user-supplied name of this application key. Key names are
// useless.
func (k *Key) Name() string { return k.k.name() }
// Expires returns the expiration date of this application key.
func (k *Key) Expires() time.Time { return k.k.expires() }
// Delete removes the key from B2.
func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) }
// Secret returns the value that should be passed into NewClient(). It is only
// available on newly created keys; it is not available from ListKey
// operations.
func (k *Key) Secret() string { return k.k.secret() }
// ID returns the application key ID. This, plus the secret, is necessary to
// authenticate to B2.
func (k *Key) ID() string { return k.k.id() }
type keyOptions struct {
caps []string
prefix string
lifetime time.Duration
}
// KeyOption specifies desired properties for application keys.
type KeyOption func(*keyOptions)
// Lifetime requests a key with the given lifetime.
func Lifetime(d time.Duration) KeyOption {
return func(k *keyOptions) {
k.lifetime = d
}
}
// Deadline requests a key that expires after the given date.
func Deadline(t time.Time) KeyOption {
d := t.Sub(time.Now())
return Lifetime(d)
}
// Capabilities requests a key with the given capability.
func Capabilities(caps ...string) KeyOption {
return func(k *keyOptions) {
k.caps = append(k.caps, caps...)
}
}
// Prefix limits the requested application key to be valid only for objects
// that begin with prefix. This can only be used when requesting an
// application key within a specific bucket.
func Prefix(prefix string) KeyOption {
return func(k *keyOptions) {
k.prefix = prefix
}
}
// CreateKey creates a global application key that is valid for all buckets in
// this project. The key's secret will only be accessible on the object
// returned from this call.
func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
var ko keyOptions
for _, o := range opts {
o(&ko)
}
if ko.prefix != "" {
return nil, errors.New("Prefix is not a valid option for global application keys")
}
ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "")
if err != nil {
return nil, err
}
return &Key{
c: c,
k: ki,
}, nil
}
// ListKeys lists all the keys associated with this project. It takes the
// maximum number of keys it should return in a call, as well as a cursor
// (which should be empty for the initial call). It will return up to count
// keys, as well as the cursor for the next invocation.
//
// ListKeys returns io.EOF when there are no more keys, although it may do so
// concurrently with the final set of keys.
func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) {
ks, next, err := c.backend.listKeys(ctx, count, cursor)
if err != nil {
return nil, "", err
}
if len(ks) == 0 {
return nil, "", io.EOF
}
var keys []*Key
for _, k := range ks {
keys = append(keys, &Key{
c: c,
k: k,
})
}
var rerr error
if next == "" {
rerr = io.EOF
}
return keys, next, rerr
}
// CreateKey creates a scoped application key that is valid only for this bucket.
func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
var ko keyOptions
for _, o := range opts {
o(&ko)
}
ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix)
if err != nil {
return nil, err
}
return &Key{
c: b.c,
k: ki,
}, nil
}

251
vendor/github.com/kurin/blazer/b2/monitor.go generated vendored Normal file
View File

@@ -0,0 +1,251 @@
// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"fmt"
"html/template"
"math"
"net/http"
"sort"
"time"
"github.com/kurin/blazer/internal/b2assets"
"github.com/kurin/blazer/x/window"
)
// StatusInfo reports information about a client.
type StatusInfo struct {
// Writers contains the status of all current uploads with progress.
Writers map[string]*WriterStatus
// Readers contains the status of all current downloads with progress.
Readers map[string]*ReaderStatus
// RPCs contains information about recently made RPC calls over the last
// minute, five minutes, hour, and for all time.
RPCs map[time.Duration]MethodList
}
// MethodList is an accumulation of RPC calls that have been made over a given
// period of time.
type MethodList []method
// CountByMethod returns the total RPC calls made per method.
func (ml MethodList) CountByMethod() map[string]int {
r := make(map[string]int)
for i := range ml {
r[ml[i].name]++
}
return r
}
type method struct {
name string
duration time.Duration
status int
}
type methodCounter struct {
d time.Duration
w *window.Window
}
func (mc methodCounter) record(m method) {
mc.w.Insert([]method{m})
}
func (mc methodCounter) retrieve() MethodList {
ms := mc.w.Reduce()
return MethodList(ms.([]method))
}
func newMethodCounter(d, res time.Duration) methodCounter {
r := func(i, j interface{}) interface{} {
a, ok := i.([]method)
if !ok {
a = nil
}
b, ok := j.([]method)
if !ok {
b = nil
}
for _, m := range b {
a = append(a, m)
}
return a
}
return methodCounter{
d: d,
w: window.New(d, res, r),
}
}
// WriterStatus reports the status for each writer.
type WriterStatus struct {
// Progress is a slice of completion ratios. The index of a ratio is its
// chunk id less one.
Progress []float64
}
// ReaderStatus reports the status for each reader.
type ReaderStatus struct {
// Progress is a slice of completion ratios. The index of a ratio is its
// chunk id less one.
Progress []float64
}
// Status returns information about the current state of the client.
func (c *Client) Status() *StatusInfo {
c.slock.Lock()
defer c.slock.Unlock()
si := &StatusInfo{
Writers: make(map[string]*WriterStatus),
Readers: make(map[string]*ReaderStatus),
RPCs: make(map[time.Duration]MethodList),
}
for name, w := range c.sWriters {
si.Writers[name] = w.status()
}
for name, r := range c.sReaders {
si.Readers[name] = r.status()
}
for _, c := range c.sMethods {
si.RPCs[c.d] = c.retrieve()
}
return si
}
func (si *StatusInfo) table() map[string]map[string]int {
r := make(map[string]map[string]int)
for d, c := range si.RPCs {
for _, m := range c {
if _, ok := r[m.name]; !ok {
r[m.name] = make(map[string]int)
}
dur := "all time"
if d > 0 {
dur = d.String()
}
r[m.name][dur]++
}
}
return r
}
func (c *Client) addWriter(w *Writer) {
c.slock.Lock()
defer c.slock.Unlock()
if c.sWriters == nil {
c.sWriters = make(map[string]*Writer)
}
c.sWriters[fmt.Sprintf("%s/%s", w.o.b.Name(), w.name)] = w
}
func (c *Client) removeWriter(w *Writer) {
c.slock.Lock()
defer c.slock.Unlock()
if c.sWriters == nil {
return
}
delete(c.sWriters, fmt.Sprintf("%s/%s", w.o.b.Name(), w.name))
}
func (c *Client) addReader(r *Reader) {
c.slock.Lock()
defer c.slock.Unlock()
if c.sReaders == nil {
c.sReaders = make(map[string]*Reader)
}
c.sReaders[fmt.Sprintf("%s/%s", r.o.b.Name(), r.name)] = r
}
func (c *Client) removeReader(r *Reader) {
c.slock.Lock()
defer c.slock.Unlock()
if c.sReaders == nil {
return
}
delete(c.sReaders, fmt.Sprintf("%s/%s", r.o.b.Name(), r.name))
}
var (
funcMap = template.FuncMap{
"inc": func(i int) int { return i + 1 },
"lookUp": func(m map[string]int, s string) int { return m[s] },
"pRange": func(i int) string {
f := float64(i)
min := int(math.Pow(2, f)) - 1
max := min + int(math.Pow(2, f))
return fmt.Sprintf("%v - %v", time.Duration(min)*time.Millisecond, time.Duration(max)*time.Millisecond)
},
"methods": func(si *StatusInfo) []string {
methods := make(map[string]bool)
for _, ms := range si.RPCs {
for _, m := range ms {
methods[m.name] = true
}
}
var names []string
for name := range methods {
names = append(names, name)
}
sort.Strings(names)
return names
},
"durations": func(si *StatusInfo) []string {
var ds []time.Duration
for d := range si.RPCs {
ds = append(ds, d)
}
sort.Slice(ds, func(i, j int) bool { return ds[i] < ds[j] })
var r []string
for _, d := range ds {
dur := "all time"
if d > 0 {
dur = d.String()
}
r = append(r, dur)
}
return r
},
"table": func(si *StatusInfo) map[string]map[string]int { return si.table() },
}
statusTemplate = template.Must(template.New("status").Funcs(funcMap).Parse(string(b2assets.MustAsset("data/status.html"))))
)
// ServeHTTP serves diagnostic information about the current state of the
// client; essentially everything available from Client.Status()
//
// ServeHTTP satisfies the http.Handler interface. This means that a Client
// can be passed directly to a path via http.Handle (or on a custom ServeMux or
// a custom http.Server).
func (c *Client) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
info := c.Status()
statusTemplate.Execute(rw, info)
}

348
vendor/github.com/kurin/blazer/b2/reader.go generated vendored Normal file
View File

@@ -0,0 +1,348 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"bytes"
"context"
"crypto/sha1"
"errors"
"fmt"
"hash"
"io"
"sync"
"time"
"github.com/kurin/blazer/internal/blog"
)
var errNoMoreContent = errors.New("416: out of content")
// Reader reads files from B2.
type Reader struct {
// ConcurrentDownloads is the number of simultaneous downloads to pull from
// B2. Values greater than one will cause B2 to make multiple HTTP requests
// for a given file, increasing available bandwidth at the cost of buffering
// the downloads in memory.
ConcurrentDownloads int
// ChunkSize is the size to fetch per ConcurrentDownload. The default is
// 10MB.
ChunkSize int
ctx context.Context
cancel context.CancelFunc // cancels ctx
o *Object
name string
offset int64 // the start of the file
length int64 // the length to read, or -1
csize int // chunk size
read int // amount read
chwid int // chunks written
chrid int // chunks read
chbuf chan *rchunk
init sync.Once
chunks map[int]*rchunk
vrfy hash.Hash
readOffEnd bool
sha1 string
rmux sync.Mutex // guards rcond
rcond *sync.Cond
emux sync.RWMutex // guards err, believe it or not
err error
smux sync.Mutex
smap map[int]*meteredReader
}
type rchunk struct {
bytes.Buffer
final bool
}
// Close frees resources associated with the download.
func (r *Reader) Close() error {
r.cancel()
r.o.b.c.removeReader(r)
return nil
}
func (r *Reader) setErr(err error) {
r.emux.Lock()
defer r.emux.Unlock()
if r.err == nil {
r.err = err
r.cancel()
}
}
func (r *Reader) setErrNoCancel(err error) {
r.emux.Lock()
defer r.emux.Unlock()
if r.err == nil {
r.err = err
}
}
func (r *Reader) getErr() error {
r.emux.RLock()
defer r.emux.RUnlock()
return r.err
}
func (r *Reader) thread() {
go func() {
for {
var buf *rchunk
select {
case b, ok := <-r.chbuf:
if !ok {
return
}
buf = b
case <-r.ctx.Done():
return
}
r.rmux.Lock()
chunkID := r.chwid
r.chwid++
r.rmux.Unlock()
offset := int64(chunkID*r.csize) + r.offset
size := int64(r.csize)
if r.length > 0 {
if size > r.length {
buf.final = true
size = r.length
}
r.length -= size
}
var b backoff
redo:
fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size)
if err == errNoMoreContent {
// this read generated a 416 so we are entirely past the end of the object
r.readOffEnd = true
buf.final = true
r.rmux.Lock()
r.chunks[chunkID] = buf
r.rmux.Unlock()
r.rcond.Broadcast()
return
}
if err != nil {
r.setErr(err)
r.rcond.Broadcast()
return
}
rsize, _, sha1, _ := fr.stats()
if len(sha1) == 40 && r.sha1 != sha1 {
r.sha1 = sha1
}
mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)}
r.smux.Lock()
r.smap[chunkID] = mr
r.smux.Unlock()
i, err := copyContext(r.ctx, buf, mr)
fr.Close()
r.smux.Lock()
r.smap[chunkID] = nil
r.smux.Unlock()
if i < int64(rsize) || err == io.ErrUnexpectedEOF {
// Probably the network connection was closed early. Retry.
blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b)
if err := b.wait(r.ctx); err != nil {
r.setErr(err)
r.rcond.Broadcast()
return
}
buf.Reset()
goto redo
}
if err != nil {
r.setErr(err)
r.rcond.Broadcast()
return
}
r.rmux.Lock()
r.chunks[chunkID] = buf
r.rmux.Unlock()
r.rcond.Broadcast()
}
}()
}
func (r *Reader) curChunk() (*rchunk, error) {
ch := make(chan *rchunk)
go func() {
r.rmux.Lock()
defer r.rmux.Unlock()
for r.chunks[r.chrid] == nil && r.getErr() == nil && r.ctx.Err() == nil {
r.rcond.Wait()
}
select {
case ch <- r.chunks[r.chrid]:
case <-r.ctx.Done():
return
}
}()
select {
case buf := <-ch:
return buf, r.getErr()
case <-r.ctx.Done():
if r.getErr() != nil {
return nil, r.getErr()
}
return nil, r.ctx.Err()
}
}
func (r *Reader) initFunc() {
r.smux.Lock()
r.smap = make(map[int]*meteredReader)
r.smux.Unlock()
r.o.b.c.addReader(r)
r.rcond = sync.NewCond(&r.rmux)
cr := r.ConcurrentDownloads
if cr < 1 {
cr = 1
}
if r.ChunkSize < 1 {
r.ChunkSize = 1e7
}
r.csize = r.ChunkSize
r.chbuf = make(chan *rchunk, cr)
for i := 0; i < cr; i++ {
r.thread()
r.chbuf <- &rchunk{}
}
r.vrfy = sha1.New()
}
func (r *Reader) Read(p []byte) (int, error) {
if err := r.getErr(); err != nil {
return 0, err
}
r.init.Do(r.initFunc)
chunk, err := r.curChunk()
if err != nil {
r.setErrNoCancel(err)
return 0, err
}
n, err := chunk.Read(p)
r.vrfy.Write(p[:n]) // Hash.Write never returns an error.
r.read += n
if err == io.EOF {
if chunk.final {
close(r.chbuf)
r.setErrNoCancel(err)
return n, err
}
r.chrid++
chunk.Reset()
r.chbuf <- chunk
err = nil
}
r.setErrNoCancel(err)
return n, err
}
func (r *Reader) status() *ReaderStatus {
r.smux.Lock()
defer r.smux.Unlock()
rs := &ReaderStatus{
Progress: make([]float64, len(r.smap)),
}
for i := 1; i <= len(r.smap); i++ {
rs.Progress[i-1] = r.smap[i].done()
}
return rs
}
// Verify checks the SHA1 hash on download and compares it to the SHA1 hash
// submitted on upload. If the two differ, this returns an error. If the
// correct hash could not be calculated (if, for example, the entire object was
// not read, or if the object was uploaded as a "large file" and thus the SHA1
// hash was not sent), this returns (nil, false).
func (r *Reader) Verify() (error, bool) {
got := fmt.Sprintf("%x", r.vrfy.Sum(nil))
if r.sha1 == got {
return nil, true
}
// TODO: if the exact length of the file is requested AND the checksum is
// bad, this will return (nil, false) instead of (an error, true). This is
// because there's no good way that I can tell to determine that we've hit
// the end of the file without reading off the end. Consider reading N+1
// bytes at the very end to close this hole.
if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 {
return nil, false
}
return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true
}
// strip a writer of any non-Write methods
type onlyWriter struct{ w io.Writer }
func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) }
func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) {
var n int64
var err error
done := make(chan struct{})
go func() {
if _, ok := w.(*Writer); ok {
w = onlyWriter{w}
}
n, err = io.Copy(w, r)
close(done)
}()
select {
case <-done:
return n, err
case <-ctx.Done():
return 0, ctx.Err()
}
}
type noopResetter struct {
io.Reader
}
func (noopResetter) Reset() error { return nil }
type backoff time.Duration
func (b *backoff) wait(ctx context.Context) error {
if *b == 0 {
*b = backoff(time.Millisecond)
}
select {
case <-time.After(time.Duration(*b)):
if time.Duration(*b) < time.Second*10 {
*b <<= 1
}
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (b backoff) String() string {
return time.Duration(b).String()
}

48
vendor/github.com/kurin/blazer/b2/readerat.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"io"
"sync"
)
type readerAt struct {
rs io.ReadSeeker
mu sync.Mutex
}
func (r *readerAt) ReadAt(p []byte, off int64) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
// ReadAt is supposed to preserve the offset.
cur, err := r.rs.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
defer r.rs.Seek(cur, io.SeekStart)
if _, err := r.rs.Seek(off, io.SeekStart); err != nil {
return 0, err
}
return io.ReadFull(r.rs, p)
}
// wraps a ReadSeeker in a mutex to provite a ReaderAt how is this not in the
// io package?
func enReaderAt(rs io.ReadSeeker) io.ReaderAt {
return &readerAt{rs: rs}
}

613
vendor/github.com/kurin/blazer/b2/writer.go generated vendored Normal file
View File

@@ -0,0 +1,613 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package b2
import (
"context"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"
"time"
"github.com/kurin/blazer/internal/blog"
)
// Writer writes data into Backblaze. It automatically switches to the large
// file API if the file exceeds ChunkSize bytes. Due to that and other
// Backblaze API details, there is a large buffer.
//
// Changes to public Writer attributes must be made before the first call to
// Write.
type Writer struct {
// ConcurrentUploads is number of different threads sending data concurrently
// to Backblaze for large files. This can increase performance greatly, as
// each thread will hit a different endpoint. However, there is a ChunkSize
// buffer for each thread. Values less than 1 are equivalent to 1.
ConcurrentUploads int
// Resume an upload. If true, and the upload is a large file, and a file of
// the same name was started but not finished, then assume that we are
// resuming that file, and don't upload duplicate chunks.
Resume bool
// ChunkSize is the size, in bytes, of each individual part, when writing
// large files, and also when determining whether to upload a file normally
// or when to split it into parts. The default is 100M (1e8) The minimum is
// 5M (5e6); values less than this are not an error, but will fail. The
// maximum is 5GB (5e9).
ChunkSize int
// UseFileBuffer controls whether to use an in-memory buffer (the default) or
// scratch space on the file system. If this is true, b2 will save chunks in
// FileBufferDir.
UseFileBuffer bool
// FileBufferDir specifies the directory where scratch files are kept. If
// blank, os.TempDir() is used.
FileBufferDir string
contentType string
info map[string]string
csize int
ctx context.Context
cancel context.CancelFunc // cancels ctx
ctxf func() context.Context
errf func(error)
ready chan chunk
cdone chan struct{}
wg sync.WaitGroup
start sync.Once
once sync.Once
done sync.Once
file beLargeFileInterface
seen map[int]string
everStarted bool
newBuffer func() (writeBuffer, error)
o *Object
name string
cidx int
w writeBuffer
emux sync.RWMutex
err error
smux sync.RWMutex
smap map[int]*meteredReader
}
type chunk struct {
id int
buf writeBuffer
}
func (w *Writer) setErr(err error) {
if err == nil || err == io.EOF {
return
}
w.emux.Lock()
defer w.emux.Unlock()
if w.err != nil {
return
}
blog.V(1).Infof("error writing %s: %v", w.name, err)
w.err = err
w.cancel()
if w.ctxf == nil {
return
}
if w.errf == nil {
w.errf = func(error) {}
}
w.errf(w.file.cancel(w.ctxf()))
}
func (w *Writer) getErr() error {
w.emux.RLock()
defer w.emux.RUnlock()
return w.err
}
func (w *Writer) registerChunk(id int, r *meteredReader) {
w.smux.Lock()
w.smap[id] = r
w.smux.Unlock()
}
func (w *Writer) completeChunk(id int) {
w.smux.Lock()
w.smap[id] = nil
w.smux.Unlock()
}
var gid int32
func sleepCtx(ctx context.Context, d time.Duration) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(d):
return nil
}
}
func (w *Writer) thread() {
w.wg.Add(1)
go func() {
defer w.wg.Done()
id := atomic.AddInt32(&gid, 1)
fc, err := w.file.getUploadPartURL(w.ctx)
if err != nil {
w.setErr(err)
return
}
for {
var cnk chunk
select {
case cnk = <-w.ready:
case <-w.cdone:
return
}
if sha, ok := w.seen[cnk.id]; ok {
if sha != cnk.buf.Hash() {
w.setErr(errors.New("resumable upload was requested, but chunks don't match"))
return
}
cnk.buf.Close()
w.completeChunk(cnk.id)
blog.V(2).Infof("skipping chunk %d", cnk.id)
continue
}
blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id)
r, err := cnk.buf.Reader()
if err != nil {
w.setErr(err)
return
}
mr := &meteredReader{r: r, size: cnk.buf.Len()}
w.registerChunk(cnk.id, mr)
sleep := time.Millisecond * 15
redo:
n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id)
if n != cnk.buf.Len() || err != nil {
if w.o.b.r.reupload(err) {
if err := sleepCtx(w.ctx, sleep); err != nil {
w.setErr(err)
w.completeChunk(cnk.id)
cnk.buf.Close() // TODO: log error
}
sleep *= 2
if sleep > time.Second*15 {
sleep = time.Second * 15
}
blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, cnk.buf.Len(), err)
f, err := w.file.getUploadPartURL(w.ctx)
if err != nil {
w.setErr(err)
w.completeChunk(cnk.id)
cnk.buf.Close() // TODO: log error
return
}
fc = f
goto redo
}
w.setErr(err)
w.completeChunk(cnk.id)
cnk.buf.Close() // TODO: log error
return
}
w.completeChunk(cnk.id)
cnk.buf.Close() // TODO: log error
blog.V(2).Infof("chunk %d handled", cnk.id)
}
}()
}
func (w *Writer) init() {
w.start.Do(func() {
w.everStarted = true
w.smux.Lock()
w.smap = make(map[int]*meteredReader)
w.smux.Unlock()
w.o.b.c.addWriter(w)
w.csize = w.ChunkSize
if w.csize == 0 {
w.csize = 1e8
}
if w.newBuffer == nil {
w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil }
if w.UseFileBuffer {
w.newBuffer = func() (writeBuffer, error) { return newFileBuffer(w.FileBufferDir) }
}
}
v, err := w.newBuffer()
if err != nil {
w.setErr(err)
return
}
w.w = v
})
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
w.init()
if err := w.getErr(); err != nil {
return 0, err
}
left := w.csize - w.w.Len()
if len(p) < left {
return w.w.Write(p)
}
i, err := w.w.Write(p[:left])
if err != nil {
w.setErr(err)
return i, err
}
if err := w.sendChunk(); err != nil {
w.setErr(err)
return i, w.getErr()
}
k, err := w.Write(p[left:])
if err != nil {
w.setErr(err)
}
return i + k, err
}
func (w *Writer) getUploadURL(ctx context.Context) (beURLInterface, error) {
u := w.o.b.urlPool.get()
if u == nil {
return w.o.b.b.getUploadURL(w.ctx)
}
return u, nil
}
func (w *Writer) simpleWriteFile() error {
ue, err := w.getUploadURL(w.ctx)
if err != nil {
return err
}
// This defer needs to be in a func() so that we put whatever the value of ue
// is at function exit.
defer func() { w.o.b.urlPool.put(ue) }()
sha1 := w.w.Hash()
ctype := w.contentType
if ctype == "" {
ctype = "application/octet-stream"
}
r, err := w.w.Reader()
if err != nil {
return err
}
mr := &meteredReader{r: r, size: w.w.Len()}
w.registerChunk(1, mr)
defer w.completeChunk(1)
redo:
f, err := ue.uploadFile(w.ctx, mr, int(w.w.Len()), w.name, ctype, sha1, w.info)
if err != nil {
if w.o.b.r.reupload(err) {
blog.V(2).Infof("b2 writer: %v; retrying", err)
u, err := w.o.b.b.getUploadURL(w.ctx)
if err != nil {
return err
}
ue = u
goto redo
}
return err
}
w.o.f = f
return nil
}
func (w *Writer) getLargeFile() (beLargeFileInterface, error) {
if !w.Resume {
ctype := w.contentType
if ctype == "" {
ctype = "application/octet-stream"
}
return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info)
}
var got bool
iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished())
var fi beFileInterface
for iter.Next() {
obj := iter.Object()
if obj.Name() == w.name {
got = true
fi = obj.f
}
}
if iter.Err() != nil {
return nil, iter.Err()
}
if !got {
w.Resume = false
return w.getLargeFile()
}
next := 1
seen := make(map[int]string)
var size int64
for {
parts, n, err := fi.listParts(w.ctx, next, 100)
if err != nil {
return nil, err
}
next = n
for _, p := range parts {
seen[p.number()] = p.sha1()
size += p.size()
}
if len(parts) == 0 {
break
}
if next == 0 {
break
}
}
w.seen = make(map[int]string) // copy the map
for id, sha := range seen {
w.seen[id] = sha
}
return fi.compileParts(size, seen), nil
}
func (w *Writer) sendChunk() error {
var err error
w.once.Do(func() {
lf, e := w.getLargeFile()
if e != nil {
err = e
return
}
w.file = lf
w.ready = make(chan chunk)
w.cdone = make(chan struct{})
if w.ConcurrentUploads < 1 {
w.ConcurrentUploads = 1
}
for i := 0; i < w.ConcurrentUploads; i++ {
w.thread()
}
})
if err != nil {
return err
}
select {
case <-w.cdone:
return nil
case w.ready <- chunk{
id: w.cidx + 1,
buf: w.w,
}:
case <-w.ctx.Done():
return w.ctx.Err()
}
w.cidx++
v, err := w.newBuffer()
if err != nil {
return err
}
w.w = v
return nil
}
// ReadFrom reads all of r into w, returning the first error or no error if r
// returns io.EOF. If r is also an io.Seeker, ReadFrom will stream r directly
// over the wire instead of buffering it locally. This reduces memory usage.
//
// Do not issue multiple calls to ReadFrom, or mix ReadFrom and Write. If you
// have multiple readers you want to concatenate into the same B2 object, use
// an io.MultiReader.
//
// Note that io.Copy will automatically choose to use ReadFrom.
//
// ReadFrom currently doesn't handle w.Resume; if w.Resume is true, ReadFrom
// will act as if r is not an io.Seeker.
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
rs, ok := r.(io.ReadSeeker)
if !ok || w.Resume {
return copyContext(w.ctx, w, r)
}
blog.V(2).Info("streaming without buffer")
size, err := rs.Seek(0, io.SeekEnd)
if err != nil {
return 0, err
}
var ra io.ReaderAt
if rat, ok := r.(io.ReaderAt); ok {
ra = rat
} else {
ra = enReaderAt(rs)
}
var offset int64
var wrote int64
w.newBuffer = func() (writeBuffer, error) {
left := size - offset
if left <= 0 {
// We're done sending real chunks; send empty chunks from now on so that
// Close() works.
w.newBuffer = func() (writeBuffer, error) { return newMemoryBuffer(), nil }
w.w = newMemoryBuffer()
return nil, io.EOF
}
csize := int64(w.csize)
if left < csize {
csize = left
}
nb := newNonBuffer(ra, offset, csize)
wrote += csize // TODO: this is kind of a total lie
offset += csize
return nb, nil
}
w.init()
if size < int64(w.csize) {
// the magic happens on w.Close()
return size, nil
}
for {
if err := w.sendChunk(); err != nil {
if err != io.EOF {
return wrote, err
}
return wrote, nil
}
}
}
// Close satisfies the io.Closer interface. It is critical to check the return
// value of Close for all writers.
func (w *Writer) Close() error {
w.done.Do(func() {
if !w.everStarted {
w.init()
w.setErr(w.simpleWriteFile())
return
}
defer w.o.b.c.removeWriter(w)
defer func() {
if err := w.w.Close(); err != nil {
// this is non-fatal, but alarming
blog.V(1).Infof("close %s: %v", w.name, err)
}
}()
if w.cidx == 0 {
w.setErr(w.simpleWriteFile())
return
}
if w.w.Len() > 0 {
if err := w.sendChunk(); err != nil {
w.setErr(err)
return
}
}
// See https://github.com/kurin/blazer/issues/60 for why we use a special
// channel for this.
close(w.cdone)
w.wg.Wait()
f, err := w.file.finishLargeFile(w.ctx)
if err != nil {
w.setErr(err)
return
}
w.o.f = f
})
return w.getErr()
}
// WithAttrs sets the writable attributes of the resulting file to given
// values. WithAttrs must be called before the first call to Write.
//
// DEPRECATED: Use WithAttrsOption instead.
func (w *Writer) WithAttrs(attrs *Attrs) *Writer {
w.contentType = attrs.ContentType
w.info = make(map[string]string)
for k, v := range attrs.Info {
w.info[k] = v
}
if len(w.info) < 10 && attrs.SHA1 != "" {
w.info["large_file_sha1"] = attrs.SHA1
}
if len(w.info) < 10 && !attrs.LastModified.IsZero() {
w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6)
}
return w
}
// A WriterOption sets Writer-specific behavior.
type WriterOption func(*Writer)
// WithAttrs attaches the given Attrs to the writer.
func WithAttrsOption(attrs *Attrs) WriterOption {
return func(w *Writer) {
w.WithAttrs(attrs)
}
}
// WithCancelOnError requests the writer, if it has started a large file
// upload, to call b2_cancel_large_file on any permanent error. It calls ctxf
// to obtain a context with which to cancel the file; this is to allow callers
// to set specific timeouts. If errf is non-nil, then it is called with the
// (possibly nil) output of b2_cancel_large_file.
func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption {
return func(w *Writer) {
w.ctxf = ctxf
w.errf = errf
}
}
// DefaultWriterOptions returns a ClientOption that will apply the given
// WriterOptions to every Writer. These options can be overridden by passing
// new options to NewWriter.
func DefaultWriterOptions(opts ...WriterOption) ClientOption {
return func(c *clientOptions) {
c.writerOpts = opts
}
}
func (w *Writer) status() *WriterStatus {
w.smux.RLock()
defer w.smux.RUnlock()
ws := &WriterStatus{
Progress: make([]float64, len(w.smap)),
}
for i := 1; i <= len(w.smap); i++ {
ws.Progress[i-1] = w.smap[i].done()
}
return ws
}
type meteredReader struct {
read int64
size int
r readResetter
mux sync.Mutex
}
func (mr *meteredReader) Read(p []byte) (int, error) {
mr.mux.Lock()
defer mr.mux.Unlock()
n, err := mr.r.Read(p)
mr.read += int64(n)
return n, err
}
func (mr *meteredReader) Reset() error {
mr.mux.Lock()
defer mr.mux.Unlock()
mr.read = 0
return mr.r.Reset()
}
func (mr *meteredReader) done() float64 {
if mr == nil {
return 1
}
read := float64(atomic.LoadInt64(&mr.read))
return read / float64(mr.size)
}

1298
vendor/github.com/kurin/blazer/base/base.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

28
vendor/github.com/kurin/blazer/base/strings.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package base
import (
"net/url"
"strings"
)
func escape(s string) string {
return strings.Replace(url.QueryEscape(s), "%2F", "/", -1)
}
func unescape(s string) (string, error) {
return url.QueryUnescape(s)
}

View File

@@ -0,0 +1,237 @@
// Code generated by go-bindata.
// sources:
// data/status.html
// DO NOT EDIT!
package b2assets
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _dataStatusHtml = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xd4\x93\x41\x6f\xe3\x20\x10\x85\xef\xf9\x15\xb3\x56\x8e\x51\x90\x73\x5c\x4d\xb8\xec\xee\x79\xa3\xaa\x52\xd5\x23\x36\xa3\x60\x09\x43\x84\x71\x9a\xc8\xe2\xbf\x57\x18\x83\xa3\xb6\x87\x5e\x7b\xf2\x98\xf7\xe6\xf1\xbe\x03\xf8\xeb\xef\xff\x3f\xcf\xaf\xa7\x7f\xa0\x7c\xaf\xf9\x06\xf3\x87\x84\xe4\x1b\x00\xf4\x9d\xd7\xc4\x9b\x03\xb4\xba\x23\xe3\x61\xf0\xc2\x8f\x03\xb2\x74\xbe\x41\x96\x9c\xd8\x58\x79\x8f\x0b\xd3\xb4\xed\xc9\x2b\x2b\x07\xf8\x7d\x84\x3c\xee\x43\x48\x9a\x1c\x9d\xf0\x9d\x35\xb3\xba\xfe\x14\xdd\x8b\x46\x53\xd4\xd2\x90\xce\x51\xd5\xbc\xb5\xa3\xf1\xd0\xdc\xa1\xb5\x92\x90\xa9\x3a\xb5\x8b\xae\x38\xc5\x65\x27\xcc\x99\x60\xb9\x3e\x66\xe4\x26\x73\x48\x74\xbb\x64\x8d\xa3\xe4\xa5\x69\x08\xc8\xbc\xcc\x52\xc9\xc9\xed\xe6\xa4\x52\x75\xc9\x5a\x43\x3a\x23\xe9\x06\x4b\xf1\x7c\x79\xf1\x7f\xcc\x26\x23\x73\x1b\x96\xeb\xac\xa7\xc8\x0a\x50\x64\x1e\x2f\xda\x0a\x39\x64\xda\x87\x6e\x46\xf4\xb4\x83\xed\x55\xe8\xd8\x6e\xff\xe2\x3a\x4f\xae\x70\xaa\x03\x9f\xa6\x64\x82\x58\x40\x1d\x3e\xc1\x75\x72\x07\xdb\x8b\xb3\xe7\x99\xee\x2a\xf4\xfe\xe4\xec\xd9\xd1\xb0\x02\x46\xb4\x36\x3a\x43\x00\xbc\x2c\x2a\x5c\x85\x1e\xe9\x58\x4d\xd3\xbc\x1d\x42\x05\xbd\xb8\x1d\xab\xba\xe2\xc8\xb2\x89\x63\xe3\x80\x7d\x05\xfd\x80\xaa\x6a\x2e\xed\x9b\xf9\x26\xe1\x13\x09\xf9\xa3\x08\x91\xa5\x17\x81\x2c\xbd\xa8\xf7\x00\x00\x00\xff\xff\xd4\xf0\x90\xb4\x69\x03\x00\x00")
func dataStatusHtmlBytes() ([]byte, error) {
return bindataRead(
_dataStatusHtml,
"data/status.html",
)
}
func dataStatusHtml() (*asset, error) {
bytes, err := dataStatusHtmlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "data/status.html", size: 873, mode: os.FileMode(436), modTime: time.Unix(1520578750, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"data/status.html": dataStatusHtml,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"data": &bintree{nil, map[string]*bintree{
"status.html": &bintree{dataStatusHtml, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}

View File

@@ -0,0 +1,18 @@
// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b2assets contains data required by other libraries in blazer.
package b2assets
//go:generate go-bindata -pkg $GOPACKAGE -o b2assets.go data/

View File

@@ -0,0 +1,281 @@
// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package b2types implements internal types common to the B2 API.
package b2types
// You know what would be amazing? If I could autogen this from like a JSON
// file. Wouldn't that be amazing? That would be amazing.
const (
V1api = "/b2api/v1/"
)
type ErrorMessage struct {
Status int `json:"status"`
Code string `json:"code"`
Msg string `json:"message"`
}
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"`
AuthToken string `json:"authorizationToken"`
URI string `json:"apiUrl"`
DownloadURI string `json:"downloadUrl"`
MinPartSize int `json:"minimumPartSize"`
PartSize int `json:"recommendedPartSize"`
AbsMinPartSize int `json:"absoluteMinimumPartSize"`
Allowed Allowance `json:"allowed"`
}
type Allowance struct {
Capabilities []string `json:"capabilities"`
Bucket string `json:"bucketId"`
Prefix string `json:"namePrefix"`
}
type LifecycleRule struct {
DaysHiddenUntilDeleted int `json:"daysFromHidingToDeleting,omitempty"`
DaysNewUntilHidden int `json:"daysFromUploadingToHiding,omitempty"`
Prefix string `json:"fileNamePrefix"`
}
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
Info map[string]string `json:"bucketInfo"`
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
}
type CreateBucketResponse struct {
BucketID string `json:"bucketId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
Info map[string]string `json:"bucketInfo"`
LifecycleRules []LifecycleRule `json:"lifecycleRules"`
Revision int `json:"revision"`
}
type DeleteBucketRequest struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId"`
}
type ListBucketsRequest struct {
AccountID string `json:"accountId"`
Bucket string `json:"bucketId,omitempty"`
}
type ListBucketsResponse struct {
Buckets []CreateBucketResponse `json:"buckets"`
}
type UpdateBucketRequest struct {
AccountID string `json:"accountId"`
BucketID string `json:"bucketId"`
Type string `json:"bucketType,omitempty"`
Info map[string]string `json:"bucketInfo,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
IfRevisionIs int `json:"ifRevisionIs,omitempty"`
}
type UpdateBucketResponse CreateBucketResponse
type GetUploadURLRequest struct {
BucketID string `json:"bucketId"`
}
type GetUploadURLResponse struct {
URI string `json:"uploadUrl"`
Token string `json:"authorizationToken"`
}
type UploadFileResponse GetFileInfoResponse
type DeleteFileVersionRequest struct {
Name string `json:"fileName"`
FileID string `json:"fileId"`
}
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"`
Name string `json:"fileName"`
ContentType string `json:"contentType"`
Info map[string]string `json:"fileInfo,omitempty"`
}
type StartLargeFileResponse struct {
ID string `json:"fileId"`
}
type CancelLargeFileRequest struct {
ID string `json:"fileId"`
}
type ListPartsRequest struct {
ID string `json:"fileId"`
Start int `json:"startPartNumber"`
Count int `json:"maxPartCount"`
}
type ListPartsResponse struct {
Next int `json:"nextPartNumber"`
Parts []struct {
ID string `json:"fileId"`
Number int `json:"partNumber"`
SHA1 string `json:"contentSha1"`
Size int64 `json:"contentLength"`
} `json:"parts"`
}
type getUploadPartURLRequest struct {
ID string `json:"fileId"`
}
type getUploadPartURLResponse struct {
URL string `json:"uploadUrl"`
Token string `json:"authorizationToken"`
}
type FinishLargeFileRequest struct {
ID string `json:"fileId"`
Hashes []string `json:"partSha1Array"`
}
type FinishLargeFileResponse struct {
Name string `json:"fileName"`
FileID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
Action string `json:"action"`
}
type ListFileNamesRequest struct {
BucketID string `json:"bucketId"`
Count int `json:"maxFileCount"`
Continuation string `json:"startFileName,omitempty"`
Prefix string `json:"prefix,omitempty"`
Delimiter string `json:"delimiter,omitempty"`
}
type ListFileNamesResponse struct {
Continuation string `json:"nextFileName"`
Files []GetFileInfoResponse `json:"files"`
}
type ListFileVersionsRequest struct {
BucketID string `json:"bucketId"`
Count int `json:"maxFileCount"`
StartName string `json:"startFileName,omitempty"`
StartID string `json:"startFileId,omitempty"`
Prefix string `json:"prefix,omitempty"`
Delimiter string `json:"delimiter,omitempty"`
}
type ListFileVersionsResponse struct {
NextName string `json:"nextFileName"`
NextID string `json:"nextFileId"`
Files []GetFileInfoResponse `json:"files"`
}
type HideFileRequest struct {
BucketID string `json:"bucketId"`
File string `json:"fileName"`
}
type HideFileResponse struct {
ID string `json:"fileId"`
Timestamp int64 `json:"uploadTimestamp"`
Action string `json:"action"`
}
type GetFileInfoRequest struct {
ID string `json:"fileId"`
}
type GetFileInfoResponse struct {
FileID string `json:"fileId,omitempty"`
Name string `json:"fileName,omitempty"`
AccountID string `json:"accountId,omitempty"`
BucketID string `json:"bucketId,omitempty"`
Size int64 `json:"contentLength,omitempty"`
SHA1 string `json:"contentSha1,omitempty"`
ContentType string `json:"contentType,omitempty"`
Info map[string]string `json:"fileInfo,omitempty"`
Action string `json:"action,omitempty"`
Timestamp int64 `json:"uploadTimestamp,omitempty"`
}
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"`
Prefix string `json:"fileNamePrefix"`
Valid int `json:"validDurationInSeconds"`
ContentDisposition string `json:"b2ContentDisposition,omitempty"`
}
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"`
Prefix string `json:"fileNamePrefix"`
Token string `json:"authorizationToken"`
}
type ListUnfinishedLargeFilesRequest struct {
BucketID string `json:"bucketId"`
Continuation string `json:"startFileId,omitempty"`
Count int `json:"maxFileCount,omitempty"`
}
type ListUnfinishedLargeFilesResponse struct {
Files []GetFileInfoResponse `json:"files"`
Continuation string `json:"nextFileId"`
}
type CreateKeyRequest struct {
AccountID string `json:"accountId"`
Capabilities []string `json:"capabilities"`
Name string `json:"keyName"`
Valid int `json:"validDurationInSeconds,omitempty"`
BucketID string `json:"bucketId,omitempty"`
Prefix string `json:"namePrefix,omitempty"`
}
type Key struct {
ID string `json:"applicationKeyId"`
Secret string `json:"applicationKey"`
AccountID string `json:"accountId"`
Capabilities []string `json:"capabilities"`
Name string `json:"keyName"`
Expires int64 `json:"expirationTimestamp"`
BucketID string `json:"bucketId"`
Prefix string `json:"namePrefix"`
}
type CreateKeyResponse Key
type DeleteKeyRequest struct {
KeyID string `json:"applicationKeyId"`
}
type DeleteKeyResponse Key
type ListKeysRequest struct {
AccountID string `json:"accountId"`
Max int `json:"maxKeyCount,omitempty"`
Next string `json:"startApplicationKeyId,omitempty"`
}
type ListKeysResponse struct {
Keys []Key `json:"keys"`
Next string `json:"nextApplicationKeyId"`
}

54
vendor/github.com/kurin/blazer/internal/blog/blog.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package blog implements a private logger, in the manner of glog, without
// polluting the flag namespace or leaving files all over /tmp.
//
// It has almost no features, and a bunch of global state.
package blog
import (
"log"
"os"
"strconv"
)
var level int32
type Verbose bool
func init() {
lvl := os.Getenv("B2_LOG_LEVEL")
i, err := strconv.ParseInt(lvl, 10, 32)
if err != nil {
return
}
level = int32(i)
}
func (v Verbose) Info(a ...interface{}) {
if v {
log.Print(a...)
}
}
func (v Verbose) Infof(format string, a ...interface{}) {
if v {
log.Printf(format, a...)
}
}
func V(target int32) Verbose {
return Verbose(target <= level)
}

162
vendor/github.com/kurin/blazer/x/window/window.go generated vendored Normal file
View File

@@ -0,0 +1,162 @@
// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package window provides a type for efficiently recording events as they
// occur over a given span of time. Events added to the window will remain
// until the time expires.
package window
import (
"sync"
"time"
)
// A Window efficiently records events that have occurred over a span of time
// extending from some fixed interval ago to now. Events that pass beyond this
// horizon are discarded.
type Window struct {
mu sync.Mutex
events []interface{}
res time.Duration
last time.Time
reduce Reducer
forever bool
e interface{}
}
// A Reducer should take two values from the window and combine them into a
// third value that will be stored in the window. The values i or j may be
// nil. The underlying types for both arguments and the output should be
// identical.
//
// If the reducer is any kind of slice or list, then data usage will grow
// linearly with the number of events added to the window.
//
// Reducer will be called on its own output: Reducer(Reducer(x, y), z).
type Reducer func(i, j interface{}) interface{}
// New returns an initialized window for events over the given duration at the
// given resolution. Windows with tight resolution (i.e., small values for
// that argument) will be more accurate, at the cost of some memory.
//
// A size of 0 means "forever"; old events will never be removed.
func New(size, resolution time.Duration, r Reducer) *Window {
if size > 0 {
return &Window{
res: resolution,
events: make([]interface{}, size/resolution),
reduce: r,
}
}
return &Window{
forever: true,
reduce: r,
}
}
func (w *Window) bucket(now time.Time) int {
nanos := now.UnixNano()
abs := nanos / int64(w.res)
return int(abs) % len(w.events)
}
// sweep keeps the window valid. It needs to be called from every method that
// views or updates the window, and the caller needs to hold the mutex.
func (w *Window) sweep(now time.Time) {
if w.forever {
return
}
defer func() {
w.last = now
}()
// This compares now and w.last's monotonic clocks.
diff := now.Sub(w.last)
if diff < 0 {
// time went backwards somehow; zero events and return
for i := range w.events {
w.events[i] = nil
}
return
}
last := now.Add(-diff)
b := w.bucket(now)
p := w.bucket(last)
if b == p && diff <= w.res {
// We're in the same bucket as the previous sweep, so all buckets are
// valid.
return
}
if diff > w.res*time.Duration(len(w.events)) {
// We've gone longer than this window measures since the last sweep, just
// zero the thing and have done.
for i := range w.events {
w.events[i] = nil
}
return
}
// Expire all invalid buckets. This means buckets not seen since the
// previous sweep and now, including the current bucket but not including the
// previous bucket.
old := int64(last.UnixNano()) / int64(w.res)
new := int64(now.UnixNano()) / int64(w.res)
for i := old + 1; i <= new; i++ {
b := int(i) % len(w.events)
w.events[b] = nil
}
}
// Insert adds the given event.
func (w *Window) Insert(e interface{}) {
w.insertAt(time.Now(), e)
}
func (w *Window) insertAt(t time.Time, e interface{}) {
w.mu.Lock()
defer w.mu.Unlock()
if w.forever {
w.e = w.reduce(w.e, e)
return
}
w.sweep(t)
w.events[w.bucket(t)] = w.reduce(w.events[w.bucket(t)], e)
}
// Reduce runs the window's reducer over the valid values and returns the
// result.
func (w *Window) Reduce() interface{} {
return w.reducedAt(time.Now())
}
func (w *Window) reducedAt(t time.Time) interface{} {
w.mu.Lock()
defer w.mu.Unlock()
if w.forever {
return w.e
}
w.sweep(t)
var n interface{}
for i := range w.events {
n = w.reduce(n, w.events[i])
}
return n
}