summaryrefslogtreecommitdiff
path: root/vendor/cloud.google.com
diff options
context:
space:
mode:
authorhwajeong.son <hwajeong.son@samsung.com>2018-08-20 13:30:55 +0900
committerhwajeong.son <hwajeong.son@samsung.com>2018-08-20 13:30:55 +0900
commit0b51891e5977b87f986f4db2cbbe09295cfdbedc (patch)
treec35ac732cb1dffccee5a32131431f753481077c2 /vendor/cloud.google.com
parenteea0e89806b2cf59af3dccabc67014bd19b91b82 (diff)
downloaddocker-engine-master.tar.gz
docker-engine-master.tar.bz2
docker-engine-master.zip
Signed-off-by: hwajeong.son <hwajeong.son@samsung.com>
Diffstat (limited to 'vendor/cloud.google.com')
-rw-r--r--vendor/cloud.google.com/go/LICENSE202
-rw-r--r--vendor/cloud.google.com/go/README.md452
-rw-r--r--vendor/cloud.google.com/go/compute/metadata/metadata.go438
-rw-r--r--vendor/cloud.google.com/go/internal/cloud.go64
-rw-r--r--vendor/cloud.google.com/go/internal/retry.go55
-rw-r--r--vendor/cloud.google.com/go/logging/apiv2/README.md11
-rw-r--r--vendor/cloud.google.com/go/logging/apiv2/config_client.go300
-rw-r--r--vendor/cloud.google.com/go/logging/apiv2/doc.go26
-rw-r--r--vendor/cloud.google.com/go/logging/apiv2/logging_client.go359
-rw-r--r--vendor/cloud.google.com/go/logging/apiv2/metrics_client.go299
-rw-r--r--vendor/cloud.google.com/go/logging/doc.go89
-rw-r--r--vendor/cloud.google.com/go/logging/internal/common.go30
-rw-r--r--vendor/cloud.google.com/go/logging/logging.go674
13 files changed, 2999 insertions, 0 deletions
diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE
new file mode 100644
index 0000000..a4c5efd
--- /dev/null
+++ b/vendor/cloud.google.com/go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2014 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md
new file mode 100644
index 0000000..9b91f52
--- /dev/null
+++ b/vendor/cloud.google.com/go/README.md
@@ -0,0 +1,452 @@
+# Google Cloud for Go
+
+[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
+[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
+
+``` go
+import "cloud.google.com/go"
+```
+
+Go packages for Google Cloud Platform services.
+
+To install the packages on your system,
+
+```
+$ go get -u cloud.google.com/go/...
+```
+
+**NOTE:** These packages are under development, and may occasionally make
+backwards-incompatible changes.
+
+**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
+
+## News
+
+_December 5, 2016_
+
+More changes to BigQuery:
+
+* The `ValueList` type was removed. It is no longer necessary. Instead of
+ ```go
+ var v ValueList
+ ... it.Next(&v) ..
+ ```
+ use
+
+ ```go
+ var v []Value
+ ... it.Next(&v) ...
+ ```
+
+* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
+ `ValueList` would append to the slice. Now each call resets the size to zero first.
+
+* Schema inference will infer the SQL type BYTES for a struct field of
+ type []byte. Previously it inferred STRING.
+
+* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
+ inference. BigQuery's integer type is INT64, and those types may hold values
+ that are not correctly represented in a 64-bit signed integer.
+
+* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
+ the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
+ package.
+
+_November 17, 2016_
+
+Change to BigQuery: values from INTEGER columns will now be returned as int64,
+not int. This will avoid errors arising from large values on 32-bit systems.
+
+_November 8, 2016_
+
+New datastore feature: datastore now encodes your nested Go structs as Entity values,
+instead of a flattened list of the embedded struct's fields.
+This means that you may now have twice-nested slices, eg.
+```go
+type State struct {
+ Cities []struct{
+ Populations []int
+ }
+}
+```
+
+See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
+more details.
+
+_November 8, 2016_
+
+Breaking changes to datastore: contexts no longer hold namespaces; instead you
+must set a key's namespace explicitly. Also, key functions have been changed
+and renamed.
+
+* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
+ ```go
+ q := datastore.NewQuery("Kind").Namespace("ns")
+ ```
+
+* All the fields of Key are exported. That means you can construct any Key with a struct literal:
+ ```go
+ k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
+ ```
+
+* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
+
+* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
+ ```go
+ NewIncompleteKey(ctx, kind, parent)
+ ```
+ with
+ ```go
+ IncompleteKey(kind, parent)
+ ```
+ and if you do use namespaces, make sure you set the namespace on the returned key.
+
+* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
+ ```go
+ NewKey(ctx, kind, name, 0, parent)
+ NewKey(ctx, kind, "", id, parent)
+ ```
+ with
+ ```go
+ NameKey(kind, name, parent)
+ IDKey(kind, id, parent)
+ ```
+ and if you do use namespaces, make sure you set the namespace on the returned key.
+
+* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
+
+* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
+
+See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
+more details.
+
+_October 27, 2016_
+
+Breaking change to bigquery: `NewGCSReference` is now a function,
+not a method on `Client`.
+
+New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
+loading data into a table from a file or any `io.Reader`.
+
+_October 21, 2016_
+
+Breaking change to pubsub: removed `pubsub.Done`.
+
+Use `iterator.Done` instead, where `iterator` is the package
+`google.golang.org/api/iterator`.
+
+
+[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
+
+## Supported APIs
+
+Google API | Status | Package
+-------------------------------|--------------|-----------------------------------------------------------
+[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
+[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
+[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
+[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
+[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref]
+[Pub/Sub][cloud-pubsub] | experimental | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
+[Vision][cloud-vision] | experimental | [`cloud.google.com/go/vision`][cloud-vision-ref]
+[Language][cloud-language] | experimental | [`cloud.google.com/go/language/apiv1beta1`][cloud-language-ref]
+[Speech][cloud-speech] | experimental | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
+
+
+> **Experimental status**: the API is still being actively developed. As a
+> result, it might change in backward-incompatible ways and is not recommended
+> for production use.
+>
+> **Beta status**: the API is largely complete, but still has outstanding
+> features and bugs to be addressed. There may be minor backwards-incompatible
+> changes where necessary.
+>
+> **Stable status**: the API is mature and ready for production use. We will
+> continue addressing bugs and feature requests.
+
+Documentation and examples are available at
+https://godoc.org/cloud.google.com/go
+
+Visit or join the
+[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
+for updates on these packages.
+
+## Go Versions Supported
+
+We support the two most recent major versions of Go. If Google App Engine uses
+an older version, we support that as well. You can see which versions are
+currently supported by looking at the lines following `go:` in
+[`.travis.yml`](.travis.yml).
+
+## Authorization
+
+By default, each API will use [Google Application Default Credentials][default-creds]
+for authorization credentials used in calling the API endpoints. This will allow your
+application to run in many environments without requiring explicit configuration.
+
+To authorize using a
+[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
+pass
+[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
+to the `NewClient` function of the desired package. For example:
+
+```go
+client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
+```
+
+You can exert more control over authorization by using the
+[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
+create an `oauth2.TokenSource`. Then pass
+[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
+to the `NewClient` function:
+```go
+tokenSource := ...
+client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
+```
+
+## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
+
+- [About Cloud Datastore][cloud-datastore]
+- [Activating the API for your project][cloud-datastore-activation]
+- [API documentation][cloud-datastore-docs]
+- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
+- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
+
+### Example Usage
+
+First create a `datastore.Client` to use throughout your application:
+
+```go
+client, err := datastore.NewClient(ctx, "my-project-id")
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+Then use that client to interact with the API:
+
+```go
+type Post struct {
+ Title string
+ Body string `datastore:",noindex"`
+ PublishedAt time.Time
+}
+keys := []*datastore.Key{
+ datastore.NewKey(ctx, "Post", "post1", 0, nil),
+ datastore.NewKey(ctx, "Post", "post2", 0, nil),
+}
+posts := []*Post{
+ {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
+ {Title: "Post 2", Body: "...", PublishedAt: time.Now()},
+}
+if _, err := client.PutMulti(ctx, keys, posts); err != nil {
+ log.Fatal(err)
+}
+```
+
+## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
+
+- [About Cloud Storage][cloud-storage]
+- [API documentation][cloud-storage-docs]
+- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
+
+### Example Usage
+
+First create a `storage.Client` to use throughout your application:
+
+```go
+client, err := storage.NewClient(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+```go
+// Read the object1 from bucket.
+rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+defer rc.Close()
+body, err := ioutil.ReadAll(rc)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
+
+- [About Cloud Pubsub][cloud-pubsub]
+- [API documentation][cloud-pubsub-docs]
+- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
+
+### Example Usage
+
+First create a `pubsub.Client` to use throughout your application:
+
+```go
+client, err := pubsub.NewClient(ctx, "project-id")
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+```go
+// Publish "hello world" on topic1.
+topic := client.Topic("topic1")
+msgIDs, err := topic.Publish(ctx, &pubsub.Message{
+ Data: []byte("hello world"),
+})
+if err != nil {
+ log.Fatal(err)
+}
+
+// Create an iterator to pull messages via subscription1.
+it, err := client.Subscription("subscription1").Pull(ctx)
+if err != nil {
+ log.Println(err)
+}
+defer it.Stop()
+
+// Consume N messages from the iterator.
+for i := 0; i < N; i++ {
+ msg, err := it.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ log.Fatalf("Failed to retrieve message: %v", err)
+ }
+
+ fmt.Printf("Message %d: %s\n", i, msg.Data)
+ msg.Done(true) // Acknowledge that we've consumed the message.
+}
+```
+
+## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
+
+- [About Cloud BigQuery][cloud-bigquery]
+- [API documentation][cloud-bigquery-docs]
+- [Go client documentation][cloud-bigquery-ref]
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
+
+### Example Usage
+
+First create a `bigquery.Client` to use throughout your application:
+```go
+c, err := bigquery.NewClient(ctx, "my-project-ID")
+if err != nil {
+ // TODO: Handle error.
+}
+```
+Then use that client to interact with the API:
+```go
+// Construct a query.
+q := c.Query(`
+ SELECT year, SUM(number)
+ FROM [bigquery-public-data:usa_names.usa_1910_2013]
+ WHERE name = "William"
+ GROUP BY year
+ ORDER BY year
+`)
+// Execute the query.
+it, err := q.Read(ctx)
+if err != nil {
+ // TODO: Handle error.
+}
+// Iterate through the results.
+for {
+ var values bigquery.ValueList
+ err := it.Next(&values)
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ // TODO: Handle error.
+ }
+ fmt.Println(values)
+}
+```
+
+
+## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
+
+- [About Stackdriver Logging][cloud-logging]
+- [API documentation][cloud-logging-docs]
+- [Go client documentation][cloud-logging-ref]
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
+
+### Example Usage
+
+First create a `logging.Client` to use throughout your application:
+
+```go
+ctx := context.Background()
+client, err := logging.NewClient(ctx, "my-project")
+if err != nil {
+ // TODO: Handle error.
+}
+```
+Usually, you'll want to add log entries to a buffer to be periodically flushed
+(automatically and asynchronously) to the Stackdriver Logging service.
+```go
+logger := client.Logger("my-log")
+logger.Log(logging.Entry{Payload: "something happened!"})
+```
+Close your client before your program exits, to flush any buffered log entries.
+```go
+err = client.Close()
+if err != nil {
+ // TODO: Handle error.
+}
+```
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
+document for details. We're using Gerrit for our code reviews. Please don't open pull
+requests against this repo, new pull requests will be automatically closed.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
+
+[cloud-datastore]: https://cloud.google.com/datastore/
+[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
+[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
+[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
+
+[cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
+[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
+
+[cloud-storage]: https://cloud.google.com/storage/
+[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
+[cloud-storage-docs]: https://cloud.google.com/storage/docs
+[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
+
+[cloud-bigtable]: https://cloud.google.com/bigtable/
+[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
+
+[cloud-bigquery]: https://cloud.google.com/bigquery/
+[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
+[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
+
+[cloud-logging]: https://cloud.google.com/logging/
+[cloud-logging-docs]: https://cloud.google.com/logging/docs
+[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
+
+[cloud-vision]: https://cloud.google.com/vision/
+[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
+
+[cloud-language]: https://cloud.google.com/natural-language
+[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1beta1
+
+[cloud-speech]: https://cloud.google.com/speech
+[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
+
+[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
new file mode 100644
index 0000000..f9d2bef
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -0,0 +1,438 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata // import "cloud.google.com/go/compute/metadata"
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+
+ "cloud.google.com/go/internal"
+)
+
+const (
+ // metadataIP is the documented metadata server IP address.
+ metadataIP = "169.254.169.254"
+
+ // metadataHostEnv is the environment variable specifying the
+ // GCE metadata hostname. If empty, the default value of
+ // metadataIP ("169.254.169.254") is used instead.
+ // This is variable name is not defined by any spec, as far as
+ // I know; it was made up for the Go package.
+ metadataHostEnv = "GCE_METADATA_HOST"
+)
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+ metaClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ ResponseHeaderTimeout: 2 * time.Second,
+ },
+ },
+ }
+ subscribeClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 2 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ },
+ },
+ }
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+ return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+ val, _, err := getETag(metaClient, suffix)
+ return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag using the provided client. This func is otherwise equivalent to Get.
+func getETag(client *http.Client, suffix string) (value, etag string, err error) {
+ // Using a fixed IP makes it very difficult to spoof the metadata service in
+ // a container, which is an important use-case for local testing of cloud
+ // deployments. To enable spoofing of the metadata service, the environment
+ // variable GCE_METADATA_HOST is first inspected to decide where metadata
+ // requests shall go.
+ host := os.Getenv(metadataHostEnv)
+ if host == "" {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ host = metadataIP
+ }
+ url := "http://" + host + "/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := client.Do(req)
+ if err != nil {
+ return "", "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return "", "", NotDefinedError(suffix)
+ }
+ if res.StatusCode != 200 {
+ return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", "", err
+ }
+ return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+ s, err = Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = getTrimmed(c.k)
+ } else {
+ v, err = Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var (
+ onGCEOnce sync.Once
+ onGCE bool
+)
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ onGCEOnce.Do(initOnGCE)
+ return onGCE
+}
+
+func initOnGCE() {
+ onGCE = testOnGCE()
+}
+
+func testOnGCE() bool {
+ // The user explicitly said they're on GCE, so trust them.
+ if os.Getenv(metadataHostEnv) != "" {
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
+ go func() {
+ res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+ }()
+
+ go func() {
+ addrs, err := net.LookupHost("metadata.google.internal")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+ }()
+
+ tryHarder := systemInfoSuggestsGCE()
+ if tryHarder {
+ res := <-resc
+ if res {
+ // The first strategy succeeded, so let's use it.
+ return true
+ }
+ // Wait for either the DNS or metadata server probe to
+ // contradict the other one and say we are running on
+ // GCE. Give it a lot of time to do so, since the system
+ // info already suggests we're running on a GCE BIOS.
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ select {
+ case res = <-resc:
+ return res
+ case <-timer.C:
+ // Too slow. Who knows what this system is.
+ return false
+ }
+ }
+
+ // There's no hint from the system info that we're running on
+ // GCE, so use the first probe's result as truth, whether it's
+ // true or false. The goal here is to optimize for speed for
+ // users who are NOT running on GCE. We can't assume that
+ // either a DNS lookup or an HTTP request to a blackholed IP
+ // address is fast. Worst case this should return when the
+ // metaClient's Transport.ResponseHeaderTimeout or
+ // Transport.Dial.Timeout fires (in two seconds).
+ return <-resc
+}
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+ if runtime.GOOS != "linux" {
+ // We don't have any non-Linux clues available, at least yet.
+ return false
+ }
+ slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+ name := strings.TrimSpace(string(slurp))
+ return name == "Google" || name == "Google Compute Engine"
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+ const failedSubscribeSleep = time.Second * 5
+
+ // First check to see if the metadata value exists at all.
+ val, lastETag, err := getETag(subscribeClient, suffix)
+ if err != nil {
+ return err
+ }
+
+ if err := fn(val, true); err != nil {
+ return err
+ }
+
+ ok := true
+ if strings.ContainsRune(suffix, '?') {
+ suffix += "&wait_for_change=true&last_etag="
+ } else {
+ suffix += "?wait_for_change=true&last_etag="
+ }
+ for {
+ val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
+ if err != nil {
+ if _, deleted := err.(NotDefinedError); !deleted {
+ time.Sleep(failedSubscribeSleep)
+ continue // Retry on other errors.
+ }
+ ok = false
+ }
+ lastETag = etag
+
+ if err := fn(val, ok); err != nil || !ok {
+ return err
+ }
+ }
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+ return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+ var s []string
+ j, err := Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+ return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+ host, err := Hostname()
+ if err != nil {
+ return "", err
+ }
+ return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+ zone, err := getTrimmed("instance/zone")
+ // zone is of the form "projects/<projNum>/zones/<zoneName>".
+ if err != nil {
+ return "", err
+ }
+ return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+ j, err := Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+ return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+ return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+func strsContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go
new file mode 100644
index 0000000..8e0c8f8
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/cloud.go
@@ -0,0 +1,64 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+ "fmt"
+ "net/http"
+)
+
+const userAgent = "gcloud-golang/0.1"
+
+// Transport is an http.RoundTripper that appends Google Cloud client's
+// user-agent to the original request's user-agent header.
+type Transport struct {
+ // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
+ // Do User-Agent some other way.
+
+ // Base is the actual http.RoundTripper
+ // requests will use. It must not be nil.
+ Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req)
+ ua := req.Header.Get("User-Agent")
+ if ua == "" {
+ ua = userAgent
+ } else {
+ ua = fmt.Sprintf("%s %s", ua, userAgent)
+ }
+ req.Header.Set("User-Agent", ua)
+ return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go
new file mode 100644
index 0000000..79995be
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/retry.go
@@ -0,0 +1,55 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+
+ "golang.org/x/net/context"
+)
+
+// Retry calls the supplied function f repeatedly according to the provided
+// backoff parameters. It returns when one of the following occurs:
+// When f's first return value is true, Retry immediately returns with f's second
+// return value.
+// When the provided context is done, Retry returns with ctx.Err().
+func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
+ return retry(ctx, bo, f, gax.Sleep)
+}
+
+func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
+ sleep func(context.Context, time.Duration) error) error {
+ var lastErr error
+ for {
+ stop, err := f()
+ if stop {
+ return err
+ }
+ // Remember the last "real" error from f.
+ if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
+ lastErr = err
+ }
+ p := bo.Pause()
+ if cerr := sleep(ctx, p); cerr != nil {
+ if lastErr != nil {
+ return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
+ }
+ return cerr
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md
new file mode 100644
index 0000000..d2d9a17
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/README.md
@@ -0,0 +1,11 @@
+Auto-generated logging v2 clients
+=================================
+
+This package includes auto-generated clients for the logging v2 API.
+
+Use the handwritten logging client (in the parent directory,
+cloud.google.com/go/logging) in preference to this.
+
+This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.
+
+
diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
new file mode 100644
index 0000000..d3091be
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go
@@ -0,0 +1,300 @@
+// Copyright 2016, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strings"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+ "golang.org/x/net/context"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+var (
+ configParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+ configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}")
+)
+
+// ConfigCallOptions contains the retry settings for each method of ConfigClient.
+type ConfigCallOptions struct {
+ ListSinks []gax.CallOption
+ GetSink []gax.CallOption
+ CreateSink []gax.CallOption
+ UpdateSink []gax.CallOption
+ DeleteSink []gax.CallOption
+}
+
+func defaultConfigClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("logging.googleapis.com:443"),
+ option.WithScopes(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read",
+ "https://www.googleapis.com/auth/logging.write",
+ ),
+ }
+}
+
+func defaultConfigCallOptions() *ConfigCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ }
+ return &ConfigCallOptions{
+ ListSinks: retry[[2]string{"default", "idempotent"}],
+ GetSink: retry[[2]string{"default", "idempotent"}],
+ CreateSink: retry[[2]string{"default", "non_idempotent"}],
+ UpdateSink: retry[[2]string{"default", "non_idempotent"}],
+ DeleteSink: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// ConfigClient is a client for interacting with Stackdriver Logging API.
+type ConfigClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ configClient loggingpb.ConfigServiceV2Client
+
+ // The call options for this service.
+ CallOptions *ConfigCallOptions
+
+ // The metadata to be sent with each request.
+ metadata metadata.MD
+}
+
+// NewConfigClient creates a new config service v2 client.
+//
+// Service for configuring sinks used to export log entries outside of
+// Stackdriver Logging.
+func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &ConfigClient{
+ conn: conn,
+ CallOptions: defaultConfigCallOptions(),
+
+ configClient: loggingpb.NewConfigServiceV2Client(conn),
+ }
+ c.SetGoogleClientInfo("gax", gax.Version)
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *ConfigClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *ConfigClient) Close() error {
+ return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *ConfigClient) SetGoogleClientInfo(name, version string) {
+ goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
+ v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
+ c.metadata = metadata.Pairs("x-goog-api-client", v)
+}
+
+// ConfigParentPath returns the path for the parent resource.
+func ConfigParentPath(project string) string {
+ path, err := configParentPathTemplate.Render(map[string]string{
+ "project": project,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// ConfigSinkPath returns the path for the sink resource.
+func ConfigSinkPath(project, sink string) string {
+ path, err := configSinkPathTemplate.Render(map[string]string{
+ "project": project,
+ "sink": sink,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// ListSinks lists sinks.
+func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ it := &LogSinkIterator{}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) {
+ var resp *loggingpb.ListSinksResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.configClient.ListSinks(ctx, req)
+ return err
+ }, c.CallOptions.ListSinks...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Sinks, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ return it
+}
+
+// GetSink gets a sink.
+func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogSink
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.configClient.GetSink(ctx, req)
+ return err
+ }, c.CallOptions.GetSink...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateSink creates a sink.
+func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogSink
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.configClient.CreateSink(ctx, req)
+ return err
+ }, c.CallOptions.CreateSink...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateSink updates or creates a sink.
+func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogSink
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.configClient.UpdateSink(ctx, req)
+ return err
+ }, c.CallOptions.UpdateSink...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteSink deletes a sink.
+func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ _, err = c.configClient.DeleteSink(ctx, req)
+ return err
+ }, c.CallOptions.DeleteSink...)
+ return err
+}
+
+// LogSinkIterator manages a stream of *loggingpb.LogSink.
+type LogSinkIterator struct {
+ items []*loggingpb.LogSink
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogSinkIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) {
+ var item *loggingpb.LogSink
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *LogSinkIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *LogSinkIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go
new file mode 100644
index 0000000..3a92278
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go
@@ -0,0 +1,26 @@
+// Copyright 2016, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+// Package logging is an experimental, auto-generated package for the
+// logging API.
+//
+// The Stackdriver Logging API lets you write log entries and manage your
+// logs, log sinks and logs-based metrics.
+//
+// Use the client at cloud.google.com/go/logging in preference to this.
+package logging // import "cloud.google.com/go/logging/apiv2"
+
+const gapicNameVersion = "gapic/0.1.0"
diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
new file mode 100644
index 0000000..321b1e2
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go
@@ -0,0 +1,359 @@
+// Copyright 2016, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strings"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+ "golang.org/x/net/context"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+var (
+ loggingParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+ loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}")
+)
+
+// CallOptions contains the retry settings for each method of Client.
+type CallOptions struct {
+ DeleteLog []gax.CallOption
+ WriteLogEntries []gax.CallOption
+ ListLogEntries []gax.CallOption
+ ListMonitoredResourceDescriptors []gax.CallOption
+}
+
+func defaultClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("logging.googleapis.com:443"),
+ option.WithScopes(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read",
+ "https://www.googleapis.com/auth/logging.write",
+ ),
+ }
+}
+
+func defaultCallOptions() *CallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ {"list", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ }
+ return &CallOptions{
+ DeleteLog: retry[[2]string{"default", "idempotent"}],
+ WriteLogEntries: retry[[2]string{"default", "non_idempotent"}],
+ ListLogEntries: retry[[2]string{"list", "idempotent"}],
+ ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// Client is a client for interacting with Stackdriver Logging API.
+type Client struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ client loggingpb.LoggingServiceV2Client
+
+ // The call options for this service.
+ CallOptions *CallOptions
+
+ // The metadata to be sent with each request.
+ metadata metadata.MD
+}
+
+// NewClient creates a new logging service v2 client.
+//
+// Service for ingesting and querying logs.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ conn: conn,
+ CallOptions: defaultCallOptions(),
+
+ client: loggingpb.NewLoggingServiceV2Client(conn),
+ }
+ c.SetGoogleClientInfo("gax", gax.Version)
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *Client) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *Client) Close() error {
+ return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *Client) SetGoogleClientInfo(name, version string) {
+ goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
+ v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
+ c.metadata = metadata.Pairs("x-goog-api-client", v)
+}
+
+// LoggingParentPath returns the path for the parent resource.
+func LoggingParentPath(project string) string {
+ path, err := loggingParentPathTemplate.Render(map[string]string{
+ "project": project,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// LoggingLogPath returns the path for the log resource.
+func LoggingLogPath(project, log string) string {
+ path, err := loggingLogPathTemplate.Render(map[string]string{
+ "project": project,
+ "log": log,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// DeleteLog deletes all the log entries in a log.
+// The log reappears if it receives new entries.
+func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ _, err = c.client.DeleteLog(ctx, req)
+ return err
+ }, c.CallOptions.DeleteLog...)
+ return err
+}
+
+// WriteLogEntries writes log entries to Stackdriver Logging. All log entries are
+// written by this method.
+func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.WriteLogEntriesResponse
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.client.WriteLogEntries(ctx, req)
+ return err
+ }, c.CallOptions.WriteLogEntries...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListLogEntries lists log entries. Use this method to retrieve log entries from Cloud
+// Logging. For ways to export log entries, see
+// [Exporting Logs](/logging/docs/export).
+func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ it := &LogEntryIterator{}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) {
+ var resp *loggingpb.ListLogEntriesResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.client.ListLogEntries(ctx, req)
+ return err
+ }, c.CallOptions.ListLogEntries...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Entries, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ return it
+}
+
+// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging.
+func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ it := &MonitoredResourceDescriptorIterator{}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
+ var resp *loggingpb.ListMonitoredResourceDescriptorsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req)
+ return err
+ }, c.CallOptions.ListMonitoredResourceDescriptors...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.ResourceDescriptors, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ return it
+}
+
+// LogEntryIterator manages a stream of *loggingpb.LogEntry.
+type LogEntryIterator struct {
+ items []*loggingpb.LogEntry
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogEntryIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) {
+ var item *loggingpb.LogEntry
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *LogEntryIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *LogEntryIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+ items []*monitoredrespb.MonitoredResourceDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ var item *monitoredrespb.MonitoredResourceDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
new file mode 100644
index 0000000..2345978
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go
@@ -0,0 +1,299 @@
+// Copyright 2016, Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// AUTO-GENERATED CODE. DO NOT EDIT.
+
+package logging
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "strings"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+ "golang.org/x/net/context"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ loggingpb "google.golang.org/genproto/googleapis/logging/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+var (
+ metricsParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
+ metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}")
+)
+
+// MetricsCallOptions contains the retry settings for each method of MetricsClient.
+type MetricsCallOptions struct {
+ ListLogMetrics []gax.CallOption
+ GetLogMetric []gax.CallOption
+ CreateLogMetric []gax.CallOption
+ UpdateLogMetric []gax.CallOption
+ DeleteLogMetric []gax.CallOption
+}
+
+func defaultMetricsClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("logging.googleapis.com:443"),
+ option.WithScopes(
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/logging.admin",
+ "https://www.googleapis.com/auth/logging.read",
+ "https://www.googleapis.com/auth/logging.write",
+ ),
+ }
+}
+
+func defaultMetricsCallOptions() *MetricsCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 1000 * time.Millisecond,
+ Multiplier: 1.2,
+ })
+ }),
+ },
+ }
+ return &MetricsCallOptions{
+ ListLogMetrics: retry[[2]string{"default", "idempotent"}],
+ GetLogMetric: retry[[2]string{"default", "idempotent"}],
+ CreateLogMetric: retry[[2]string{"default", "non_idempotent"}],
+ UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}],
+ DeleteLogMetric: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// MetricsClient is a client for interacting with Stackdriver Logging API.
+type MetricsClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ metricsClient loggingpb.MetricsServiceV2Client
+
+ // The call options for this service.
+ CallOptions *MetricsCallOptions
+
+ // The metadata to be sent with each request.
+ metadata metadata.MD
+}
+
+// NewMetricsClient creates a new metrics service v2 client.
+//
+// Service for configuring logs-based metrics.
+func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &MetricsClient{
+ conn: conn,
+ CallOptions: defaultMetricsCallOptions(),
+
+ metricsClient: loggingpb.NewMetricsServiceV2Client(conn),
+ }
+ c.SetGoogleClientInfo("gax", gax.Version)
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *MetricsClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricsClient) Close() error {
+ return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricsClient) SetGoogleClientInfo(name, version string) {
+ goVersion := strings.Replace(runtime.Version(), " ", "_", -1)
+ v := fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, goVersion)
+ c.metadata = metadata.Pairs("x-goog-api-client", v)
+}
+
+// MetricsParentPath returns the path for the parent resource.
+func MetricsParentPath(project string) string {
+ path, err := metricsParentPathTemplate.Render(map[string]string{
+ "project": project,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// MetricsMetricPath returns the path for the metric resource.
+func MetricsMetricPath(project, metric string) string {
+ path, err := metricsMetricPathTemplate.Render(map[string]string{
+ "project": project,
+ "metric": metric,
+ })
+ if err != nil {
+ panic(err)
+ }
+ return path
+}
+
+// ListLogMetrics lists logs-based metrics.
+func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ it := &LogMetricIterator{}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) {
+ var resp *loggingpb.ListLogMetricsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.metricsClient.ListLogMetrics(ctx, req)
+ return err
+ }, c.CallOptions.ListLogMetrics...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Metrics, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ return it
+}
+
+// GetLogMetric gets a logs-based metric.
+func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogMetric
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.metricsClient.GetLogMetric(ctx, req)
+ return err
+ }, c.CallOptions.GetLogMetric...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// CreateLogMetric creates a logs-based metric.
+func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogMetric
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.metricsClient.CreateLogMetric(ctx, req)
+ return err
+ }, c.CallOptions.CreateLogMetric...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateLogMetric creates or updates a logs-based metric.
+func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ var resp *loggingpb.LogMetric
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ resp, err = c.metricsClient.UpdateLogMetric(ctx, req)
+ return err
+ }, c.CallOptions.UpdateLogMetric...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteLogMetric deletes a logs-based metric.
+func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error {
+ md, _ := metadata.FromContext(ctx)
+ ctx = metadata.NewContext(ctx, metadata.Join(md, c.metadata))
+ err := gax.Invoke(ctx, func(ctx context.Context) error {
+ var err error
+ _, err = c.metricsClient.DeleteLogMetric(ctx, req)
+ return err
+ }, c.CallOptions.DeleteLogMetric...)
+ return err
+}
+
+// LogMetricIterator manages a stream of *loggingpb.LogMetric.
+type LogMetricIterator struct {
+ items []*loggingpb.LogMetric
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *LogMetricIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) {
+ var item *loggingpb.LogMetric
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *LogMetricIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *LogMetricIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go
new file mode 100644
index 0000000..6da3adf
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/doc.go
@@ -0,0 +1,89 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package logging contains a Stackdriver Logging client suitable for writing logs.
+For reading logs, and working with sinks, metrics and monitored resources,
+see package cloud.google.com/go/logging/logadmin.
+
+This client uses Logging API v2.
+See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API.
+
+This package is experimental and subject to API changes.
+
+
+Creating a Client
+
+Use a Client to interact with the Stackdriver Logging API.
+
+ // Create a Client
+ ctx := context.Background()
+ client, err := logging.NewClient(ctx, "my-project")
+ if err != nil {
+ // TODO: Handle error.
+ }
+
+
+Basic Usage
+
+For most use-cases, you'll want to add log entries to a buffer to be periodically
+flushed (automatically and asynchronously) to the Stackdriver Logging service.
+
+ // Initialize a logger
+ lg := client.Logger("my-log")
+
+ // Add entry to log buffer
+ lg.Log(logging.Entry{Payload: "something happened!"})
+
+
+Closing your Client
+
+You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service.
+
+ // Close the client when finished.
+ err = client.Close()
+ if err != nil {
+ // TODO: Handle error.
+ }
+
+
+Synchronous Logging
+
+For critical errors, you may want to send your log entries immediately.
+LogSync is slow and will block until the log entry has been sent, so it is
+not recommended for basic use.
+
+ lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"})
+
+
+The Standard Logger Interface
+
+You may want use a standard log.Logger in your program.
+
+ // stdlg implements log.Logger
+ stdlg := lg.StandardLogger(logging.Info)
+ stdlg.Println("some info")
+
+
+Log Levels
+
+An Entry may have one of a number of severity levels associated with it.
+
+ logging.Entry{
+ Payload: "something terrible happened!",
+ Severity: logging.Critical,
+ }
+
+*/
+package logging // import "cloud.google.com/go/logging"
diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go
new file mode 100644
index 0000000..7d8ece0
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/internal/common.go
@@ -0,0 +1,30 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ ProdAddr = "logging.googleapis.com:443"
+ Version = "0.2.0"
+)
+
+func LogPath(parent, logID string) string {
+ logID = strings.Replace(logID, "/", "%2F", -1)
+ return fmt.Sprintf("%s/logs/%s", parent, logID)
+}
diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go
new file mode 100644
index 0000000..8506800
--- /dev/null
+++ b/vendor/cloud.google.com/go/logging/logging.go
@@ -0,0 +1,674 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// API/gRPC features intentionally missing from this client:
+// - You cannot have the server pick the time of the entry. This client
+// always sends a time.
+// - There is no way to provide a protocol buffer payload.
+// - No support for the "partial success" feature when writing log entries.
+
+// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded.
+// These features are missing now, but will likely be added:
+// - There is no way to specify CallOptions.
+
+package logging
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "math"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ vkit "cloud.google.com/go/logging/apiv2"
+ "cloud.google.com/go/logging/internal"
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ structpb "github.com/golang/protobuf/ptypes/struct"
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+ "golang.org/x/net/context"
+ "google.golang.org/api/option"
+ "google.golang.org/api/support/bundler"
+ mrpb "google.golang.org/genproto/googleapis/api/monitoredres"
+ logtypepb "google.golang.org/genproto/googleapis/logging/type"
+ logpb "google.golang.org/genproto/googleapis/logging/v2"
+)
+
+const (
+ // Scope for reading from the logging service.
+ ReadScope = "https://www.googleapis.com/auth/logging.read"
+
+ // Scope for writing to the logging service.
+ WriteScope = "https://www.googleapis.com/auth/logging.write"
+
+ // Scope for administrative actions on the logging service.
+ AdminScope = "https://www.googleapis.com/auth/logging.admin"
+)
+
+const (
+ // defaultErrorCapacity is the capacity of the channel used to deliver
+ // errors to the OnError function.
+ defaultErrorCapacity = 10
+
+ // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption.
+ DefaultDelayThreshold = time.Second
+
+ // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption.
+ DefaultEntryCountThreshold = 1000
+
+ // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption.
+ DefaultEntryByteThreshold = 1 << 20 // 1MiB
+
+ // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption.
+ DefaultBufferedByteLimit = 1 << 30 // 1GiB
+)
+
+// For testing:
+var now = time.Now
+
+// ErrOverflow signals that the number of buffered entries for a Logger
+// exceeds its BufferLimit.
+var ErrOverflow = errors.New("logging: log entry overflowed buffer limits")
+
+// Client is a Logging client. A Client is associated with a single Cloud project.
+type Client struct {
+ client *vkit.Client // client for the logging service
+ projectID string
+ errc chan error // should be buffered to minimize dropped errors
+ donec chan struct{} // closed on Client.Close to close Logger bundlers
+ loggers sync.WaitGroup // so we can wait for loggers to close
+ closed bool
+
+ // OnError is called when an error occurs in a call to Log or Flush. The
+ // error may be due to an invalid Entry, an overflow because BufferLimit
+ // was reached (in which case the error will be ErrOverflow) or an error
+ // communicating with the logging service. OnError is called with errors
+ // from all Loggers. It is never called concurrently. OnError is expected
+ // to return quickly; if errors occur while OnError is running, some may
+ // not be reported. The default behavior is to call log.Printf.
+ //
+ // This field should be set only once, before any method of Client is called.
+ OnError func(err error)
+}
+
+// NewClient returns a new logging client associated with the provided project ID.
+//
+// By default NewClient uses WriteScope. To use a different scope, call
+// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes).
+func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
+ // Check for '/' in project ID to reserve the ability to support various owning resources,
+ // in the form "{Collection}/{Name}", for instance "organizations/my-org".
+ if strings.ContainsRune(projectID, '/') {
+ return nil, errors.New("logging: project ID contains '/'")
+ }
+ opts = append([]option.ClientOption{
+ option.WithEndpoint(internal.ProdAddr),
+ option.WithScopes(WriteScope),
+ }, opts...)
+ c, err := vkit.NewClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ c.SetGoogleClientInfo("logging", internal.Version)
+ client := &Client{
+ client: c,
+ projectID: projectID,
+ errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors
+ donec: make(chan struct{}),
+ OnError: func(e error) { log.Printf("logging client: %v", e) },
+ }
+ // Call the user's function synchronously, to make life easier for them.
+ go func() {
+ for err := range client.errc {
+ // This reference to OnError is memory-safe if the user sets OnError before
+ // calling any client methods. The reference happens before the first read from
+ // client.errc, which happens before the first write to client.errc, which
+ // happens before any call, which happens before the user sets OnError.
+ if fn := client.OnError; fn != nil {
+ fn(err)
+ } else {
+ log.Printf("logging (project ID %q): %v", projectID, err)
+ }
+ }
+ }()
+ return client, nil
+}
+
+// parent returns the string used in many RPCs to denote the parent resource of the log.
+func (c *Client) parent() string {
+ return "projects/" + c.projectID
+}
+
+var unixZeroTimestamp *tspb.Timestamp
+
+func init() {
+ var err error
+ unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0))
+ if err != nil {
+ panic(err)
+ }
+}
+
+// Ping reports whether the client's connection to the logging service and the
+// authentication configuration are valid. To accomplish this, Ping writes a
+// log entry "ping" to a log named "ping".
+func (c *Client) Ping(ctx context.Context) error {
+ ent := &logpb.LogEntry{
+ Payload: &logpb.LogEntry_TextPayload{"ping"},
+ Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both
+ InsertId: "ping", // necessary for the service to dedup these entries.
+ }
+ _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
+ LogName: internal.LogPath(c.parent(), "ping"),
+ Resource: &mrpb.MonitoredResource{Type: "global"},
+ Entries: []*logpb.LogEntry{ent},
+ })
+ return err
+}
+
+// A Logger is used to write log messages to a single log. It can be configured
+// with a log ID, common monitored resource, and a set of common labels.
+type Logger struct {
+ client *Client
+ logName string // "projects/{projectID}/logs/{logID}"
+ stdLoggers map[Severity]*log.Logger
+ bundler *bundler.Bundler
+
+ // Options
+ commonResource *mrpb.MonitoredResource
+ commonLabels map[string]string
+}
+
+// A LoggerOption is a configuration option for a Logger.
+type LoggerOption interface {
+ set(*Logger)
+}
+
+// CommonResource sets the monitored resource associated with all log entries
+// written from a Logger. If not provided, a resource of type "global" is used.
+// This value can be overridden by setting an Entry's Resource field.
+func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} }
+
+type commonResource struct{ *mrpb.MonitoredResource }
+
+func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource }
+
+// CommonLabels are labels that apply to all log entries written from a Logger,
+// so that you don't have to repeat them in each log entry's Labels field. If
+// any of the log entries contains a (key, value) with the same key that is in
+// CommonLabels, then the entry's (key, value) overrides the one in
+// CommonLabels.
+func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) }
+
+type commonLabels map[string]string
+
+func (c commonLabels) set(l *Logger) { l.commonLabels = c }
+
+// DelayThreshold is the maximum amount of time that an entry should remain
+// buffered in memory before a call to the logging service is triggered. Larger
+// values of DelayThreshold will generally result in fewer calls to the logging
+// service, while increasing the risk that log entries will be lost if the
+// process crashes.
+// The default is DefaultDelayThreshold.
+func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) }
+
+type delayThreshold time.Duration
+
+func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) }
+
+// EntryCountThreshold is the maximum number of entries that will be buffered
+// in memory before a call to the logging service is triggered. Larger values
+// will generally result in fewer calls to the logging service, while
+// increasing both memory consumption and the risk that log entries will be
+// lost if the process crashes.
+// The default is DefaultEntryCountThreshold.
+func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) }
+
+type entryCountThreshold int
+
+func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) }
+
+// EntryByteThreshold is the maximum number of bytes of entries that will be
+// buffered in memory before a call to the logging service is triggered. See
+// EntryCountThreshold for a discussion of the tradeoffs involved in setting
+// this option.
+// The default is DefaultEntryByteThreshold.
+func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) }
+
+type entryByteThreshold int
+
+func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) }
+
+// EntryByteLimit is the maximum number of bytes of entries that will be sent
+// in a single call to the logging service. This option limits the size of a
+// single RPC payload, to account for network or service issues with large
+// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has
+// no effect.
+// The default is zero, meaning there is no limit.
+func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }
+
+type entryByteLimit int
+
+func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) }
+
+// BufferedByteLimit is the maximum number of bytes that the Logger will keep
+// in memory before returning ErrOverflow. This option limits the total memory
+// consumption of the Logger (but note that each Logger has its own, separate
+// limit). It is possible to reach BufferedByteLimit even if it is larger than
+// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter
+// two options may be enqueued (and hence occupying memory) while new log
+// entries are being added.
+// The default is DefaultBufferedByteLimit.
+func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) }
+
+type bufferedByteLimit int
+
+func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) }
+
+// Logger returns a Logger that will write entries with the given log ID, such as
+// "syslog". A log ID must be less than 512 characters long and can only
+// include the following characters: upper and lower case alphanumeric
+// characters: [A-Za-z0-9]; and punctuation characters: forward-slash,
+// underscore, hyphen, and period.
+func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger {
+ l := &Logger{
+ client: c,
+ logName: internal.LogPath(c.parent(), logID),
+ commonResource: &mrpb.MonitoredResource{Type: "global"},
+ }
+ // TODO(jba): determine the right context for the bundle handler.
+ ctx := context.TODO()
+ l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) {
+ l.writeLogEntries(ctx, entries.([]*logpb.LogEntry))
+ })
+ l.bundler.DelayThreshold = DefaultDelayThreshold
+ l.bundler.BundleCountThreshold = DefaultEntryCountThreshold
+ l.bundler.BundleByteThreshold = DefaultEntryByteThreshold
+ l.bundler.BufferedByteLimit = DefaultBufferedByteLimit
+ for _, opt := range opts {
+ opt.set(l)
+ }
+
+ l.stdLoggers = map[Severity]*log.Logger{}
+ for s := range severityName {
+ l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0)
+ }
+ c.loggers.Add(1)
+ go func() {
+ defer c.loggers.Done()
+ <-c.donec
+ l.bundler.Close()
+ }()
+ return l
+}
+
+type severityWriter struct {
+ l *Logger
+ s Severity
+}
+
+func (w severityWriter) Write(p []byte) (n int, err error) {
+ w.l.Log(Entry{
+ Severity: w.s,
+ Payload: string(p),
+ })
+ return len(p), nil
+}
+
+// Close closes the client.
+func (c *Client) Close() error {
+ if c.closed {
+ return nil
+ }
+ close(c.donec) // close Logger bundlers
+ c.loggers.Wait() // wait for all bundlers to flush and close
+ // Now there can be no more errors.
+ close(c.errc) // terminate error goroutine
+ // Return only the first error. Since all clients share an underlying connection,
+ // Closes after the first always report a "connection is closing" error.
+ err := c.client.Close()
+ c.closed = true
+ return err
+}
+
+// Severity is the severity of the event described in a log entry. These
+// guideline severity levels are ordered, with numerically smaller levels
+// treated as less severe than numerically larger levels.
+type Severity int
+
+const (
+ // Default means the log entry has no assigned severity level.
+ Default = Severity(logtypepb.LogSeverity_DEFAULT)
+ // Debug means debug or trace information.
+ Debug = Severity(logtypepb.LogSeverity_DEBUG)
+ // Info means routine information, such as ongoing status or performance.
+ Info = Severity(logtypepb.LogSeverity_INFO)
+ // Notice means normal but significant events, such as start up, shut down, or configuration.
+ Notice = Severity(logtypepb.LogSeverity_NOTICE)
+ // Warning means events that might cause problems.
+ Warning = Severity(logtypepb.LogSeverity_WARNING)
+ // Error means events that are likely to cause problems.
+ Error = Severity(logtypepb.LogSeverity_ERROR)
+ // Critical means events that cause more severe problems or brief outages.
+ Critical = Severity(logtypepb.LogSeverity_CRITICAL)
+ // Alert means a person must take an action immediately.
+ Alert = Severity(logtypepb.LogSeverity_ALERT)
+ // Emergency means one or more systems are unusable.
+ Emergency = Severity(logtypepb.LogSeverity_EMERGENCY)
+)
+
+var severityName = map[Severity]string{
+ Default: "Default",
+ Debug: "Debug",
+ Info: "Info",
+ Notice: "Notice",
+ Warning: "Warning",
+ Error: "Error",
+ Critical: "Critical",
+ Alert: "Alert",
+ Emergency: "Emergency",
+}
+
+// String converts a severity level to a string.
+func (v Severity) String() string {
+ // same as proto.EnumName
+ s, ok := severityName[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// ParseSeverity returns the Severity whose name equals s, ignoring case. It
+// returns Default if no Severity matches.
+func ParseSeverity(s string) Severity {
+ sl := strings.ToLower(s)
+ for sev, name := range severityName {
+ if strings.ToLower(name) == sl {
+ return sev
+ }
+ }
+ return Default
+}
+
+// Entry is a log entry.
+// See https://cloud.google.com/logging/docs/view/logs_index for more about entries.
+type Entry struct {
+ // Timestamp is the time of the entry. If zero, the current time is used.
+ Timestamp time.Time
+
+ // Severity is the entry's severity level.
+ // The zero value is Default.
+ Severity Severity
+
+ // Payload must be either a string or something that
+ // marshals via the encoding/json package to a JSON object
+ // (and not any other type of JSON value).
+ Payload interface{}
+
+ // Labels optionally specifies key/value labels for the log entry.
+ // The Logger.Log method takes ownership of this map. See Logger.CommonLabels
+ // for more about labels.
+ Labels map[string]string
+
+ // InsertID is a unique ID for the log entry. If you provide this field,
+ // the logging service considers other log entries in the same log with the
+ // same ID as duplicates which can be removed. If omitted, the logging
+ // service will generate a unique ID for this log entry. Note that because
+ // this client retries RPCs automatically, it is possible (though unlikely)
+ // that an Entry without an InsertID will be written more than once.
+ InsertID string
+
+ // HTTPRequest optionally specifies metadata about the HTTP request
+ // associated with this log entry, if applicable. It is optional.
+ HTTPRequest *HTTPRequest
+
+ // Operation optionally provides information about an operation associated
+ // with the log entry, if applicable.
+ Operation *logpb.LogEntryOperation
+
+ // LogName is the full log name, in the form
+ // "projects/{ProjectID}/logs/{LogID}". It is set by the client when
+ // reading entries. It is an error to set it when writing entries.
+ LogName string
+
+ // Resource is the monitored resource associated with the entry. It is set
+ // by the client when reading entries. It is an error to set it when
+ // writing entries.
+ Resource *mrpb.MonitoredResource
+}
+
+// HTTPRequest contains an http.Request as well as additional
+// information about the request and its response.
+type HTTPRequest struct {
+ // Request is the http.Request passed to the handler.
+ Request *http.Request
+
+ // RequestSize is the size of the HTTP request message in bytes, including
+ // the request headers and the request body.
+ RequestSize int64
+
+ // Status is the response code indicating the status of the response.
+ // Examples: 200, 404.
+ Status int
+
+ // ResponseSize is the size of the HTTP response message sent back to the client, in bytes,
+ // including the response headers and the response body.
+ ResponseSize int64
+
+ // Latency is the request processing latency on the server, from the time the request was
+ // received until the response was sent.
+ Latency time.Duration
+
+ // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the
+ // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329".
+ RemoteIP string
+
+ // CacheHit reports whether an entity was served from cache (with or without
+ // validation).
+ CacheHit bool
+
+ // CacheValidatedWithOriginServer reports whether the response was
+ // validated with the origin server before being served from cache. This
+ // field is only meaningful if CacheHit is true.
+ CacheValidatedWithOriginServer bool
+}
+
+func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest {
+ if r == nil {
+ return nil
+ }
+ if r.Request == nil {
+ panic("HTTPRequest must have a non-nil Request")
+ }
+ u := *r.Request.URL
+ u.Fragment = ""
+ return &logtypepb.HttpRequest{
+ RequestMethod: r.Request.Method,
+ RequestUrl: u.String(),
+ RequestSize: r.RequestSize,
+ Status: int32(r.Status),
+ ResponseSize: r.ResponseSize,
+ Latency: ptypes.DurationProto(r.Latency),
+ UserAgent: r.Request.UserAgent(),
+ RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr?
+ Referer: r.Request.Referer(),
+ CacheHit: r.CacheHit,
+ CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer,
+ }
+}
+
+// toProtoStruct converts v, which must marshal into a JSON object,
+// into a Google Struct proto.
+func toProtoStruct(v interface{}) (*structpb.Struct, error) {
+ // Fast path: if v is already a *structpb.Struct, nothing to do.
+ if s, ok := v.(*structpb.Struct); ok {
+ return s, nil
+ }
+ // v is a Go struct that supports JSON marshalling. We want a Struct
+ // protobuf. Some day we may have a more direct way to get there, but right
+ // now the only way is to marshal the Go struct to JSON, unmarshal into a
+ // map, and then build the Struct proto from the map.
+ jb, err := json.Marshal(v)
+ if err != nil {
+ return nil, fmt.Errorf("logging: json.Marshal: %v", err)
+ }
+ var m map[string]interface{}
+ err = json.Unmarshal(jb, &m)
+ if err != nil {
+ return nil, fmt.Errorf("logging: json.Unmarshal: %v", err)
+ }
+ return jsonMapToProtoStruct(m), nil
+}
+
+func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct {
+ fields := map[string]*structpb.Value{}
+ for k, v := range m {
+ fields[k] = jsonValueToStructValue(v)
+ }
+ return &structpb.Struct{Fields: fields}
+}
+
+func jsonValueToStructValue(v interface{}) *structpb.Value {
+ switch x := v.(type) {
+ case bool:
+ return &structpb.Value{Kind: &structpb.Value_BoolValue{x}}
+ case float64:
+ return &structpb.Value{Kind: &structpb.Value_NumberValue{x}}
+ case string:
+ return &structpb.Value{Kind: &structpb.Value_StringValue{x}}
+ case nil:
+ return &structpb.Value{Kind: &structpb.Value_NullValue{}}
+ case map[string]interface{}:
+ return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}}
+ case []interface{}:
+ var vals []*structpb.Value
+ for _, e := range x {
+ vals = append(vals, jsonValueToStructValue(e))
+ }
+ return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}}
+ default:
+ panic(fmt.Sprintf("bad type %T for JSON value", v))
+ }
+}
+
+// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow
+// and will block, it is intended primarily for debugging or critical errors.
+// Prefer Log for most uses.
+// TODO(jba): come up with a better name (LogNow?) or eliminate.
+func (l *Logger) LogSync(ctx context.Context, e Entry) error {
+ ent, err := toLogEntry(e)
+ if err != nil {
+ return err
+ }
+ _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{
+ LogName: l.logName,
+ Resource: l.commonResource,
+ Labels: l.commonLabels,
+ Entries: []*logpb.LogEntry{ent},
+ })
+ return err
+}
+
+// Log buffers the Entry for output to the logging service. It never blocks.
+func (l *Logger) Log(e Entry) {
+ ent, err := toLogEntry(e)
+ if err != nil {
+ l.error(err)
+ return
+ }
+ if err := l.bundler.Add(ent, proto.Size(ent)); err != nil {
+ l.error(err)
+ }
+}
+
+// Flush blocks until all currently buffered log entries are sent.
+func (l *Logger) Flush() {
+ l.bundler.Flush()
+}
+
+func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) {
+ req := &logpb.WriteLogEntriesRequest{
+ LogName: l.logName,
+ Resource: l.commonResource,
+ Labels: l.commonLabels,
+ Entries: entries,
+ }
+ _, err := l.client.client.WriteLogEntries(ctx, req)
+ if err != nil {
+ l.error(err)
+ }
+}
+
+// error puts the error on the client's error channel
+// without blocking.
+func (l *Logger) error(err error) {
+ select {
+ case l.client.errc <- err:
+ default:
+ }
+}
+
+// StandardLogger returns a *log.Logger for the provided severity.
+//
+// This method is cheap. A single log.Logger is pre-allocated for each
+// severity level in each Logger. Callers may mutate the returned log.Logger
+// (for example by calling SetFlags or SetPrefix).
+func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] }
+
+func trunc32(i int) int32 {
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
+ return int32(i)
+}
+
+func toLogEntry(e Entry) (*logpb.LogEntry, error) {
+ if e.LogName != "" {
+ return nil, errors.New("logging: Entry.LogName should be not be set when writing")
+ }
+ t := e.Timestamp
+ if t.IsZero() {
+ t = now()
+ }
+ ts, err := ptypes.TimestampProto(t)
+ if err != nil {
+ return nil, err
+ }
+ ent := &logpb.LogEntry{
+ Timestamp: ts,
+ Severity: logtypepb.LogSeverity(e.Severity),
+ InsertId: e.InsertID,
+ HttpRequest: fromHTTPRequest(e.HTTPRequest),
+ Operation: e.Operation,
+ Labels: e.Labels,
+ }
+
+ switch p := e.Payload.(type) {
+ case string:
+ ent.Payload = &logpb.LogEntry_TextPayload{p}
+ default:
+ s, err := toProtoStruct(p)
+ if err != nil {
+ return nil, err
+ }
+ ent.Payload = &logpb.LogEntry_JsonPayload{s}
+ }
+ return ent, nil
+}