diff --git a/_tools/src/github.com/jteeuwen/go-bindata/LICENSE b/_tools/src/github.com/jteeuwen/go-bindata/LICENSE
new file mode 100644
index 0000000..c07a931
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/LICENSE
@@ -0,0 +1,3 @@
+This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+license. Its contents can be found at:
+http://creativecommons.org/publicdomain/zero/1.0
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/README.md b/_tools/src/github.com/jteeuwen/go-bindata/README.md
new file mode 100644
index 0000000..1fb4e6d
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/README.md
@@ -0,0 +1,193 @@
+# Warning
+
+**this repository is not maintained. Questions or suggestions can be posted [here](https://github.com/jteeuwen/discussions/issues).**
+
+## bindata
+
+This package converts any file into managable Go source code. Useful for
+embedding binary data into a go program. The file data is optionally gzip
+compressed before being converted to a raw byte slice.
+
+It comes with a command line tool in the `go-bindata` sub directory.
+This tool offers a set of command line options, used to customize the
+output being generated.
+
+
+### Installation
+
+To install the library and command line program, use the following:
+
+ go get -u github.com/jteeuwen/go-bindata/...
+
+
+### Usage
+
+Conversion is done on one or more sets of files. They are all embedded in a new
+Go source file, along with a table of contents and an `Asset` function,
+which allows quick access to the asset, based on its name.
+
+The simplest invocation generates a `bindata.go` file in the current
+working directory. It includes all assets from the `data` directory.
+
+ $ go-bindata data/
+
+To include all input sub-directories recursively, use the elipsis postfix
+as defined for Go import paths. Otherwise it will only consider assets in the
+input directory itself.
+
+ $ go-bindata data/...
+
+To specify the name of the output file being generated, we use the following:
+
+ $ go-bindata -o myfile.go data/
+
+Multiple input directories can be specified if necessary.
+
+ $ go-bindata dir1/... /path/to/dir2/... dir3
+
+
+The following paragraphs detail some of the command line options which can be
+supplied to `go-bindata`. Refer to the `testdata/out` directory for various
+output examples from the assets in `testdata/in`. Each example uses different
+command line options.
+
+To ignore files, pass in regexes using -ignore, for example:
+
+ $ go-bindata -ignore=\\.gitignore data/...
+
+### Accessing an asset
+
+To access asset data, we use the `Asset(string) ([]byte, error)` function which
+is included in the generated output.
+
+ data, err := Asset("pub/style/foo.css")
+ if err != nil {
+ // Asset was not found.
+ }
+
+ // use asset data
+
+
+### Debug vs Release builds
+
+When invoking the program with the `-debug` flag, the generated code does
+not actually include the asset data. Instead, it generates function stubs
+which load the data from the original file on disk. The asset API remains
+identical between debug and release builds, so your code will not have to
+change.
+
+This is useful during development when you expect the assets to change often.
+The host application using these assets uses the same API in both cases and
+will not have to care where the actual data comes from.
+
+An example is a Go webserver with some embedded, static web content like
+HTML, JS and CSS files. While developing it, you do not want to rebuild the
+whole server and restart it every time you make a change to a bit of
+javascript. You just want to build and launch the server once. Then just press
+refresh in the browser to see those changes. Embedding the assets with the
+`debug` flag allows you to do just that. When you are finished developing and
+ready for deployment, just re-invoke `go-bindata` without the `-debug` flag.
+It will now embed the latest version of the assets.
+
+
+### Lower memory footprint
+
+Using the `-nomemcopy` flag, will alter the way the output file is generated.
+It will employ a hack that allows us to read the file data directly from
+the compiled program's `.rodata` section. This ensures that when we call
+call our generated function, we omit unnecessary memcopies.
+
+The downside of this, is that it requires dependencies on the `reflect` and
+`unsafe` packages. These may be restricted on platforms like AppEngine and
+thus prevent you from using this mode.
+
+Another disadvantage is that the byte slice we create, is strictly read-only.
+For most use-cases this is not a problem, but if you ever try to alter the
+returned byte slice, a runtime panic is thrown. Use this mode only on target
+platforms where memory constraints are an issue.
+
+The default behaviour is to use the old code generation method. This
+prevents the two previously mentioned issues, but will employ at least one
+extra memcopy and thus increase memory requirements.
+
+For instance, consider the following two examples:
+
+This would be the default mode, using an extra memcopy but gives a safe
+implementation without dependencies on `reflect` and `unsafe`:
+
+```go
+func myfile() []byte {
+ return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
+}
+```
+
+Here is the same functionality, but uses the `.rodata` hack.
+The byte slice returned from this example can not be written to without
+generating a runtime error.
+
+```go
+var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
+
+func myfile() []byte {
+ var empty [0]byte
+ sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
+ b := empty[:]
+ bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bx.Data = sx.Data
+ bx.Len = len(_myfile)
+ bx.Cap = bx.Len
+ return b
+}
+```
+
+
+### Optional compression
+
+When the `-nocompress` flag is given, the supplied resource is *not* GZIP
+compressed before being turned into Go code. The data should still be accessed
+through a function call, so nothing changes in the usage of the generated file.
+
+This feature is useful if you do not care for compression, or the supplied
+resource is already compressed. Doing it again would not add any value and may
+even increase the size of the data.
+
+The default behaviour of the program is to use compression.
+
+
+### Path prefix stripping
+
+The keys used in the `_bindata` map, are the same as the input file name
+passed to `go-bindata`. This includes the path. In most cases, this is not
+desireable, as it puts potentially sensitive information in your code base.
+For this purpose, the tool supplies another command line flag `-prefix`.
+This accepts a portion of a path name, which should be stripped off from
+the map keys and function names.
+
+For example, running without the `-prefix` flag, we get:
+
+ $ go-bindata /path/to/templates/
+
+ _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html
+
+Running with the `-prefix` flag, we get:
+
+ $ go-bindata -prefix "/path/to/" /path/to/templates/
+
+ _bindata["templates/foo.html"] = templates_foo_html
+
+
+### Build tags
+
+With the optional `-tags` flag, you can specify any go build tags that
+must be fulfilled for the output file to be included in a build. This
+is useful when including binary data in multiple formats, where the desired
+format is specified at build time with the appropriate tags.
+
+The tags are appended to a `// +build` line in the beginning of the output file
+and must follow the build tags syntax specified by the go tool.
+
+### Related projects
+
+[go-bindata-assetfs](https://github.com/elazarl/go-bindata-assetfs#readme) -
+implements `http.FileSystem` interface. Allows you to serve assets with `net/http`.
+
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/asset.go b/_tools/src/github.com/jteeuwen/go-bindata/asset.go
new file mode 100644
index 0000000..95b6b94
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/asset.go
@@ -0,0 +1,12 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+// Asset holds information about a single asset to be processed.
+type Asset struct {
+ Path string // Full file path.
+ Name string // Key used in TOC -- name by which asset is referenced.
+ Func string // Function name for the procedure returning the asset contents.
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/bytewriter.go b/_tools/src/github.com/jteeuwen/go-bindata/bytewriter.go
new file mode 100644
index 0000000..05d6d67
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/bytewriter.go
@@ -0,0 +1,44 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "fmt"
+ "io"
+)
+
+var (
+ newline = []byte{'\n'}
+ dataindent = []byte{'\t', '\t'}
+ space = []byte{' '}
+)
+
+type ByteWriter struct {
+ io.Writer
+ c int
+}
+
+func (w *ByteWriter) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+
+ for n = range p {
+ if w.c%12 == 0 {
+ w.Writer.Write(newline)
+ w.Writer.Write(dataindent)
+ w.c = 0
+ } else {
+ w.Writer.Write(space)
+ }
+
+ fmt.Fprintf(w.Writer, "0x%02x,", p[n])
+ w.c++
+ }
+
+ n++
+
+ return
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/config.go b/_tools/src/github.com/jteeuwen/go-bindata/config.go
new file mode 100644
index 0000000..2bd0d56
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/config.go
@@ -0,0 +1,203 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+)
+
+// InputConfig defines options on a asset directory to be convert.
+type InputConfig struct {
+ // Path defines a directory containing asset files to be included
+ // in the generated output.
+ Path string
+
+ // Recusive defines whether subdirectories of Path
+ // should be recursively included in the conversion.
+ Recursive bool
+}
+
+// Config defines a set of options for the asset conversion.
+type Config struct {
+ // Name of the package to use. Defaults to 'main'.
+ Package string
+
+ // Tags specify a set of optional build tags, which should be
+ // included in the generated output. The tags are appended to a
+ // `// +build` line in the beginning of the output file
+ // and must follow the build tags syntax specified by the go tool.
+ Tags string
+
+ // Input defines the directory path, containing all asset files as
+ // well as whether to recursively process assets in any sub directories.
+ Input []InputConfig
+
+ // Output defines the output file for the generated code.
+ // If left empty, this defaults to 'bindata.go' in the current
+ // working directory.
+ Output string
+
+ // Prefix defines a path prefix which should be stripped from all
+ // file names when generating the keys in the table of contents.
+ // For example, running without the `-prefix` flag, we get:
+ //
+ // $ go-bindata /path/to/templates
+ // go_bindata["/path/to/templates/foo.html"] = _path_to_templates_foo_html
+ //
+ // Running with the `-prefix` flag, we get:
+ //
+ // $ go-bindata -prefix "/path/to/" /path/to/templates/foo.html
+ // go_bindata["templates/foo.html"] = templates_foo_html
+ Prefix string
+
+ // NoMemCopy will alter the way the output file is generated.
+ //
+ // It will employ a hack that allows us to read the file data directly from
+ // the compiled program's `.rodata` section. This ensures that when we call
+ // call our generated function, we omit unnecessary mem copies.
+ //
+ // The downside of this, is that it requires dependencies on the `reflect` and
+ // `unsafe` packages. These may be restricted on platforms like AppEngine and
+ // thus prevent you from using this mode.
+ //
+ // Another disadvantage is that the byte slice we create, is strictly read-only.
+ // For most use-cases this is not a problem, but if you ever try to alter the
+ // returned byte slice, a runtime panic is thrown. Use this mode only on target
+ // platforms where memory constraints are an issue.
+ //
+ // The default behaviour is to use the old code generation method. This
+ // prevents the two previously mentioned issues, but will employ at least one
+ // extra memcopy and thus increase memory requirements.
+ //
+ // For instance, consider the following two examples:
+ //
+ // This would be the default mode, using an extra memcopy but gives a safe
+ // implementation without dependencies on `reflect` and `unsafe`:
+ //
+ // func myfile() []byte {
+ // return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
+ // }
+ //
+ // Here is the same functionality, but uses the `.rodata` hack.
+ // The byte slice returned from this example can not be written to without
+ // generating a runtime error.
+ //
+ // var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
+ //
+ // func myfile() []byte {
+ // var empty [0]byte
+ // sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
+ // b := empty[:]
+ // bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ // bx.Data = sx.Data
+ // bx.Len = len(_myfile)
+ // bx.Cap = bx.Len
+ // return b
+ // }
+ NoMemCopy bool
+
+ // NoCompress means the assets are /not/ GZIP compressed before being turned
+ // into Go code. The generated function will automatically unzip
+ // the file data when called. Defaults to false.
+ NoCompress bool
+
+ // Perform a debug build. This generates an asset file, which
+ // loads the asset contents directly from disk at their original
+ // location, instead of embedding the contents in the code.
+ //
+ // This is mostly useful if you anticipate that the assets are
+ // going to change during your development cycle. You will always
+ // want your code to access the latest version of the asset.
+ // Only in release mode, will the assets actually be embedded
+ // in the code. The default behaviour is Release mode.
+ Debug bool
+
+ // Perform a dev build, which is nearly identical to the debug option. The
+ // only difference is that instead of absolute file paths in generated code,
+ // it expects a variable, `rootDir`, to be set in the generated code's
+ // package (the author needs to do this manually), which it then prepends to
+ // an asset's name to construct the file path on disk.
+ //
+ // This is mainly so you can push the generated code file to a shared
+ // repository.
+ Dev bool
+
+ // When true, size, mode and modtime are not preserved from files
+ NoMetadata bool
+ // When nonzero, use this as mode for all files.
+ Mode uint
+ // When nonzero, use this as unix timestamp for all files.
+ ModTime int64
+
+ // Ignores any filenames matching the regex pattern specified, e.g.
+ // path/to/file.ext will ignore only that file, or \\.gitignore
+ // will match any .gitignore file.
+ //
+ // This parameter can be provided multiple times.
+ Ignore []*regexp.Regexp
+}
+
+// NewConfig returns a default configuration struct.
+func NewConfig() *Config {
+ c := new(Config)
+ c.Package = "main"
+ c.NoMemCopy = false
+ c.NoCompress = false
+ c.Debug = false
+ c.Output = "./bindata.go"
+ c.Ignore = make([]*regexp.Regexp, 0)
+ return c
+}
+
+// validate ensures the config has sane values.
+// Part of which means checking if certain file/directory paths exist.
+func (c *Config) validate() error {
+ if len(c.Package) == 0 {
+ return fmt.Errorf("Missing package name")
+ }
+
+ for _, input := range c.Input {
+ _, err := os.Lstat(input.Path)
+ if err != nil {
+ return fmt.Errorf("Failed to stat input path '%s': %v", input.Path, err)
+ }
+ }
+
+ if len(c.Output) == 0 {
+ cwd, err := os.Getwd()
+ if err != nil {
+ return fmt.Errorf("Unable to determine current working directory.")
+ }
+
+ c.Output = filepath.Join(cwd, "bindata.go")
+ }
+
+ stat, err := os.Lstat(c.Output)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("Output path: %v", err)
+ }
+
+ // File does not exist. This is fine, just make
+ // sure the directory it is to be in exists.
+ dir, _ := filepath.Split(c.Output)
+ if dir != "" {
+ err = os.MkdirAll(dir, 0744)
+
+ if err != nil {
+ return fmt.Errorf("Create output directory: %v", err)
+ }
+ }
+ }
+
+ if stat != nil && stat.IsDir() {
+ return fmt.Errorf("Output path is a directory.")
+ }
+
+ return nil
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/convert.go b/_tools/src/github.com/jteeuwen/go-bindata/convert.go
new file mode 100644
index 0000000..cf0466e
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/convert.go
@@ -0,0 +1,261 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode"
+)
+
+// Translate reads assets from an input directory, converts them
+// to Go code and writes new files to the output specified
+// in the given configuration.
+func Translate(c *Config) error {
+ var toc []Asset
+
+ // Ensure our configuration has sane values.
+ err := c.validate()
+ if err != nil {
+ return err
+ }
+
+ var knownFuncs = make(map[string]int)
+ var visitedPaths = make(map[string]bool)
+ // Locate all the assets.
+ for _, input := range c.Input {
+ err = findFiles(input.Path, c.Prefix, input.Recursive, &toc, c.Ignore, knownFuncs, visitedPaths)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Create output file.
+ fd, err := os.Create(c.Output)
+ if err != nil {
+ return err
+ }
+
+ defer fd.Close()
+
+ // Create a buffered writer for better performance.
+ bfd := bufio.NewWriter(fd)
+ defer bfd.Flush()
+
+ // Write the header. This makes e.g. Github ignore diffs in generated files.
+ if _, err = fmt.Fprint(bfd, "// Code generated by go-bindata.\n"); err != nil {
+ return err
+ }
+ if _, err = fmt.Fprint(bfd, "// sources:\n"); err != nil {
+ return err
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ return err
+ }
+
+ for _, asset := range toc {
+ relative, _ := filepath.Rel(wd, asset.Path)
+ if _, err = fmt.Fprintf(bfd, "// %s\n", filepath.ToSlash(relative)); err != nil {
+ return err
+ }
+ }
+ if _, err = fmt.Fprint(bfd, "// DO NOT EDIT!\n\n"); err != nil {
+ return err
+ }
+
+ // Write build tags, if applicable.
+ if len(c.Tags) > 0 {
+ if _, err = fmt.Fprintf(bfd, "// +build %s\n\n", c.Tags); err != nil {
+ return err
+ }
+ }
+
+ // Write package declaration.
+ _, err = fmt.Fprintf(bfd, "package %s\n\n", c.Package)
+ if err != nil {
+ return err
+ }
+
+ // Write assets.
+ if c.Debug || c.Dev {
+ err = writeDebug(bfd, c, toc)
+ } else {
+ err = writeRelease(bfd, c, toc)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ // Write table of contents
+ if err := writeTOC(bfd, toc); err != nil {
+ return err
+ }
+ // Write hierarchical tree of assets
+ if err := writeTOCTree(bfd, toc); err != nil {
+ return err
+ }
+
+ // Write restore procedure
+ return writeRestore(bfd)
+}
+
+// Implement sort.Interface for []os.FileInfo based on Name()
+type ByName []os.FileInfo
+
+func (v ByName) Len() int { return len(v) }
+func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
+func (v ByName) Less(i, j int) bool { return v[i].Name() < v[j].Name() }
+
+// findFiles recursively finds all the file paths in the given directory tree.
+// They are added to the given map as keys. Values will be safe function names
+// for each file, which will be used when generating the output code.
+func findFiles(dir, prefix string, recursive bool, toc *[]Asset, ignore []*regexp.Regexp, knownFuncs map[string]int, visitedPaths map[string]bool) error {
+ dirpath := dir
+ if len(prefix) > 0 {
+ dirpath, _ = filepath.Abs(dirpath)
+ prefix, _ = filepath.Abs(prefix)
+ prefix = filepath.ToSlash(prefix)
+ }
+
+ fi, err := os.Stat(dirpath)
+ if err != nil {
+ return err
+ }
+
+ var list []os.FileInfo
+
+ if !fi.IsDir() {
+ dirpath = filepath.Dir(dirpath)
+ list = []os.FileInfo{fi}
+ } else {
+ visitedPaths[dirpath] = true
+ fd, err := os.Open(dirpath)
+ if err != nil {
+ return err
+ }
+
+ defer fd.Close()
+
+ list, err = fd.Readdir(0)
+ if err != nil {
+ return err
+ }
+
+ // Sort to make output stable between invocations
+ sort.Sort(ByName(list))
+ }
+
+ for _, file := range list {
+ var asset Asset
+ asset.Path = filepath.Join(dirpath, file.Name())
+ asset.Name = filepath.ToSlash(asset.Path)
+
+ ignoring := false
+ for _, re := range ignore {
+ if re.MatchString(asset.Path) {
+ ignoring = true
+ break
+ }
+ }
+ if ignoring {
+ continue
+ }
+
+ if file.IsDir() {
+ if recursive {
+ recursivePath := filepath.Join(dir, file.Name())
+ visitedPaths[asset.Path] = true
+ findFiles(recursivePath, prefix, recursive, toc, ignore, knownFuncs, visitedPaths)
+ }
+ continue
+ } else if file.Mode()&os.ModeSymlink == os.ModeSymlink {
+ var linkPath string
+ if linkPath, err = os.Readlink(asset.Path); err != nil {
+ return err
+ }
+ if !filepath.IsAbs(linkPath) {
+ if linkPath, err = filepath.Abs(dirpath + "/" + linkPath); err != nil {
+ return err
+ }
+ }
+ if _, ok := visitedPaths[linkPath]; !ok {
+ visitedPaths[linkPath] = true
+ findFiles(asset.Path, prefix, recursive, toc, ignore, knownFuncs, visitedPaths)
+ }
+ continue
+ }
+
+ if strings.HasPrefix(asset.Name, prefix) {
+ asset.Name = asset.Name[len(prefix):]
+ } else {
+ asset.Name = filepath.Join(dir, file.Name())
+ }
+
+ // If we have a leading slash, get rid of it.
+ if len(asset.Name) > 0 && asset.Name[0] == '/' {
+ asset.Name = asset.Name[1:]
+ }
+
+ // This shouldn't happen.
+ if len(asset.Name) == 0 {
+ return fmt.Errorf("Invalid file: %v", asset.Path)
+ }
+
+ asset.Func = safeFunctionName(asset.Name, knownFuncs)
+ asset.Path, _ = filepath.Abs(asset.Path)
+ *toc = append(*toc, asset)
+ }
+
+ return nil
+}
+
+var regFuncName = regexp.MustCompile(`[^a-zA-Z0-9_]`)
+
+// safeFunctionName converts the given name into a name
+// which qualifies as a valid function identifier. It
+// also compares against a known list of functions to
+// prevent conflict based on name translation.
+func safeFunctionName(name string, knownFuncs map[string]int) string {
+ var inBytes, outBytes []byte
+ var toUpper bool
+
+ name = strings.ToLower(name)
+ inBytes = []byte(name)
+
+ for i := 0; i < len(inBytes); i++ {
+ if regFuncName.Match([]byte{inBytes[i]}) {
+ toUpper = true
+ } else if toUpper {
+ outBytes = append(outBytes, []byte(strings.ToUpper(string(inBytes[i])))...)
+ toUpper = false
+ } else {
+ outBytes = append(outBytes, inBytes[i])
+ }
+ }
+
+ name = string(outBytes)
+
+ // Identifier can't start with a digit.
+ if unicode.IsDigit(rune(name[0])) {
+ name = "_" + name
+ }
+
+ if num, ok := knownFuncs[name]; ok {
+ knownFuncs[name] = num + 1
+ name = fmt.Sprintf("%s%d", name, num)
+ } else {
+ knownFuncs[name] = 2
+ }
+
+ return name
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/debug.go b/_tools/src/github.com/jteeuwen/go-bindata/debug.go
new file mode 100644
index 0000000..09fee78
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/debug.go
@@ -0,0 +1,87 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "fmt"
+ "io"
+)
+
+// writeDebug writes the debug code file.
+func writeDebug(w io.Writer, c *Config, toc []Asset) error {
+ err := writeDebugHeader(w)
+ if err != nil {
+ return err
+ }
+
+ for i := range toc {
+ err = writeDebugAsset(w, c, &toc[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeDebugHeader writes output file headers.
+// This targets debug builds.
+func writeDebugHeader(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// bindataRead reads the given file from disk. It returns an error on failure.
+func bindataRead(path, name string) ([]byte, error) {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ err = fmt.Errorf("Error reading asset %%s at %%s: %%v", name, path, err)
+ }
+ return buf, err
+}
+
+type asset struct {
+ bytes []byte
+ info os.FileInfo
+}
+
+`)
+ return err
+}
+
+// writeDebugAsset write a debug entry for the given asset.
+// A debug entry is simply a function which reads the asset from
+// the original file (e.g.: from disk).
+func writeDebugAsset(w io.Writer, c *Config, asset *Asset) error {
+ pathExpr := fmt.Sprintf("%q", asset.Path)
+ if c.Dev {
+ pathExpr = fmt.Sprintf("filepath.Join(rootDir, %q)", asset.Name)
+ }
+
+ _, err := fmt.Fprintf(w, `// %s reads file data from disk. It returns an error on failure.
+func %s() (*asset, error) {
+ path := %s
+ name := %q
+ bytes, err := bindataRead(path, name)
+ if err != nil {
+ return nil, err
+ }
+
+ fi, err := os.Stat(path)
+ if err != nil {
+ err = fmt.Errorf("Error reading asset info %%s at %%s: %%v", name, path, err)
+ }
+
+ a := &asset{bytes: bytes, info: fi}
+ return a, err
+}
+
+`, asset.Func, asset.Func, pathExpr, asset.Name)
+ return err
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/doc.go b/_tools/src/github.com/jteeuwen/go-bindata/doc.go
new file mode 100644
index 0000000..09ead1e
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/doc.go
@@ -0,0 +1,129 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+/*
+bindata converts any file into managable Go source code. Useful for
+embedding binary data into a go program. The file data is optionally gzip
+compressed before being converted to a raw byte slice.
+
+The following paragraphs cover some of the customization options
+which can be specified in the Config struct, which must be passed into
+the Translate() call.
+
+
+Debug vs Release builds
+
+When used with the `Debug` option, the generated code does not actually include
+the asset data. Instead, it generates function stubs which load the data from
+the original file on disk. The asset API remains identical between debug and
+release builds, so your code will not have to change.
+
+This is useful during development when you expect the assets to change often.
+The host application using these assets uses the same API in both cases and
+will not have to care where the actual data comes from.
+
+An example is a Go webserver with some embedded, static web content like
+HTML, JS and CSS files. While developing it, you do not want to rebuild the
+whole server and restart it every time you make a change to a bit of
+javascript. You just want to build and launch the server once. Then just press
+refresh in the browser to see those changes. Embedding the assets with the
+`debug` flag allows you to do just that. When you are finished developing and
+ready for deployment, just re-invoke `go-bindata` without the `-debug` flag.
+It will now embed the latest version of the assets.
+
+
+Lower memory footprint
+
+The `NoMemCopy` option will alter the way the output file is generated.
+It will employ a hack that allows us to read the file data directly from
+the compiled program's `.rodata` section. This ensures that when we call
+call our generated function, we omit unnecessary memcopies.
+
+The downside of this, is that it requires dependencies on the `reflect` and
+`unsafe` packages. These may be restricted on platforms like AppEngine and
+thus prevent you from using this mode.
+
+Another disadvantage is that the byte slice we create, is strictly read-only.
+For most use-cases this is not a problem, but if you ever try to alter the
+returned byte slice, a runtime panic is thrown. Use this mode only on target
+platforms where memory constraints are an issue.
+
+The default behaviour is to use the old code generation method. This
+prevents the two previously mentioned issues, but will employ at least one
+extra memcopy and thus increase memory requirements.
+
+For instance, consider the following two examples:
+
+This would be the default mode, using an extra memcopy but gives a safe
+implementation without dependencies on `reflect` and `unsafe`:
+
+ func myfile() []byte {
+ return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a}
+ }
+
+Here is the same functionality, but uses the `.rodata` hack.
+The byte slice returned from this example can not be written to without
+generating a runtime error.
+
+ var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a"
+
+ func myfile() []byte {
+ var empty [0]byte
+ sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile))
+ b := empty[:]
+ bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bx.Data = sx.Data
+ bx.Len = len(_myfile)
+ bx.Cap = bx.Len
+ return b
+ }
+
+
+Optional compression
+
+The NoCompress option indicates that the supplied assets are *not* GZIP
+compressed before being turned into Go code. The data should still be accessed
+through a function call, so nothing changes in the API.
+
+This feature is useful if you do not care for compression, or the supplied
+resource is already compressed. Doing it again would not add any value and may
+even increase the size of the data.
+
+The default behaviour of the program is to use compression.
+
+
+Path prefix stripping
+
+The keys used in the `_bindata` map are the same as the input file name
+passed to `go-bindata`. This includes the path. In most cases, this is not
+desireable, as it puts potentially sensitive information in your code base.
+For this purpose, the tool supplies another command line flag `-prefix`.
+This accepts a portion of a path name, which should be stripped off from
+the map keys and function names.
+
+For example, running without the `-prefix` flag, we get:
+
+ $ go-bindata /path/to/templates/
+
+ _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html
+
+Running with the `-prefix` flag, we get:
+
+ $ go-bindata -prefix "/path/to/" /path/to/templates/
+
+ _bindata["templates/foo.html"] = templates_foo_html
+
+
+Build tags
+
+With the optional Tags field, you can specify any go build tags that
+must be fulfilled for the output file to be included in a build. This
+is useful when including binary data in multiple formats, where the desired
+format is specified at build time with the appropriate tags.
+
+The tags are appended to a `// +build` line in the beginning of the output file
+and must follow the build tags syntax specified by the go tool.
+
+*/
+package bindata
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/AppendSliceValue.go b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/AppendSliceValue.go
new file mode 100644
index 0000000..f5da495
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/AppendSliceValue.go
@@ -0,0 +1,22 @@
+package main
+
+import "strings"
+
+// borrowed from https://github.com/hashicorp/serf/blob/master/command/agent/flag_slice_value.go
+
+// AppendSliceValue implements the flag.Value interface and allows multiple
+// calls to the same variable to append a list.
+type AppendSliceValue []string
+
+func (s *AppendSliceValue) String() string {
+ return strings.Join(*s, ",")
+}
+
+func (s *AppendSliceValue) Set(value string) error {
+ if *s == nil {
+ *s = make([]string, 0, 1)
+ }
+
+ *s = append(*s, value)
+ return nil
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/main.go b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/main.go
new file mode 100644
index 0000000..503a059
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/main.go
@@ -0,0 +1,107 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/jteeuwen/go-bindata"
+)
+
+func main() {
+ cfg := parseArgs()
+ err := bindata.Translate(cfg)
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "bindata: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+// parseArgs create s a new, filled configuration instance
+// by reading and parsing command line options.
+//
+// This function exits the program with an error, if
+// any of the command line options are incorrect.
+func parseArgs() *bindata.Config {
+ var version bool
+
+ c := bindata.NewConfig()
+
+ flag.Usage = func() {
+ fmt.Printf("Usage: %s [options] \n\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+
+ flag.BoolVar(&c.Debug, "debug", c.Debug, "Do not embed the assets, but provide the embedding API. Contents will still be loaded from disk.")
+ flag.BoolVar(&c.Dev, "dev", c.Dev, "Similar to debug, but does not emit absolute paths. Expects a rootDir variable to already exist in the generated code's package.")
+ flag.StringVar(&c.Tags, "tags", c.Tags, "Optional set of build tags to include.")
+ flag.StringVar(&c.Prefix, "prefix", c.Prefix, "Optional path prefix to strip off asset names.")
+ flag.StringVar(&c.Package, "pkg", c.Package, "Package name to use in the generated code.")
+ flag.BoolVar(&c.NoMemCopy, "nomemcopy", c.NoMemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.")
+ flag.BoolVar(&c.NoCompress, "nocompress", c.NoCompress, "Assets will *not* be GZIP compressed when this flag is specified.")
+ flag.BoolVar(&c.NoMetadata, "nometadata", c.NoMetadata, "Assets will not preserve size, mode, and modtime info.")
+ flag.UintVar(&c.Mode, "mode", c.Mode, "Optional file mode override for all files.")
+ flag.Int64Var(&c.ModTime, "modtime", c.ModTime, "Optional modification unix timestamp override for all files.")
+ flag.StringVar(&c.Output, "o", c.Output, "Optional name of the output file to be generated.")
+ flag.BoolVar(&version, "version", false, "Displays version information.")
+
+ ignore := make([]string, 0)
+ flag.Var((*AppendSliceValue)(&ignore), "ignore", "Regex pattern to ignore")
+
+ flag.Parse()
+
+ patterns := make([]*regexp.Regexp, 0)
+ for _, pattern := range ignore {
+ patterns = append(patterns, regexp.MustCompile(pattern))
+ }
+ c.Ignore = patterns
+
+ if version {
+ fmt.Printf("%s\n", Version())
+ os.Exit(0)
+ }
+
+ // Make sure we have input paths.
+ if flag.NArg() == 0 {
+ fmt.Fprintf(os.Stderr, "Missing \n\n")
+ flag.Usage()
+ os.Exit(1)
+ }
+
+ // Create input configurations.
+ c.Input = make([]bindata.InputConfig, flag.NArg())
+ for i := range c.Input {
+ c.Input[i] = parseInput(flag.Arg(i))
+ }
+
+ return c
+}
+
+// parseRecursive determines whether the given path has a recrusive indicator and
+// returns a new path with the recursive indicator chopped off if it does.
+//
+// ex:
+// /path/to/foo/... -> (/path/to/foo, true)
+// /path/to/bar -> (/path/to/bar, false)
+func parseInput(path string) bindata.InputConfig {
+ if strings.HasSuffix(path, "/...") {
+ return bindata.InputConfig{
+ Path: filepath.Clean(path[:len(path)-4]),
+ Recursive: true,
+ }
+ } else {
+ return bindata.InputConfig{
+ Path: filepath.Clean(path),
+ Recursive: false,
+ }
+ }
+
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/version.go b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/version.go
new file mode 100644
index 0000000..a12508e
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/go-bindata/version.go
@@ -0,0 +1,31 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+)
+
+const (
+ AppName = "go-bindata"
+ AppVersionMajor = 3
+ AppVersionMinor = 1
+)
+
+// revision part of the program version.
+// This will be set automatically at build time like so:
+//
+// go build -ldflags "-X main.AppVersionRev `date -u +%s`"
+var AppVersionRev string
+
+func Version() string {
+ if len(AppVersionRev) == 0 {
+ AppVersionRev = "0"
+ }
+
+ return fmt.Sprintf("%s %d.%d.%s (Go runtime %s).\nCopyright (c) 2010-2013, Jim Teeuwen.",
+ AppName, AppVersionMajor, AppVersionMinor, AppVersionRev, runtime.Version())
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/release.go b/_tools/src/github.com/jteeuwen/go-bindata/release.go
new file mode 100644
index 0000000..6aefeb2
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/release.go
@@ -0,0 +1,387 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "unicode/utf8"
+)
+
+// writeRelease writes the release code file.
+func writeRelease(w io.Writer, c *Config, toc []Asset) error {
+ err := writeReleaseHeader(w, c)
+ if err != nil {
+ return err
+ }
+
+ for i := range toc {
+ err = writeReleaseAsset(w, c, &toc[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeReleaseHeader writes output file headers.
+// This targets release builds.
+func writeReleaseHeader(w io.Writer, c *Config) error {
+ var err error
+ if c.NoCompress {
+ if c.NoMemCopy {
+ err = header_uncompressed_nomemcopy(w)
+ } else {
+ err = header_uncompressed_memcopy(w)
+ }
+ } else {
+ if c.NoMemCopy {
+ err = header_compressed_nomemcopy(w)
+ } else {
+ err = header_compressed_memcopy(w)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return header_release_common(w)
+}
+
+// writeReleaseAsset write a release entry for the given asset.
+// A release entry is a function which embeds and returns
+// the file's byte content.
+func writeReleaseAsset(w io.Writer, c *Config, asset *Asset) error {
+ fd, err := os.Open(asset.Path)
+ if err != nil {
+ return err
+ }
+
+ defer fd.Close()
+
+ if c.NoCompress {
+ if c.NoMemCopy {
+ err = uncompressed_nomemcopy(w, asset, fd)
+ } else {
+ err = uncompressed_memcopy(w, asset, fd)
+ }
+ } else {
+ if c.NoMemCopy {
+ err = compressed_nomemcopy(w, asset, fd)
+ } else {
+ err = compressed_memcopy(w, asset, fd)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return asset_release_common(w, c, asset)
+}
+
+// sanitize prepares a valid UTF-8 string as a raw string constant.
+// Based on https://code.google.com/p/go/source/browse/godoc/static/makestatic.go?repo=tools
+func sanitize(b []byte) []byte {
+ // Replace ` with `+"`"+`
+ b = bytes.Replace(b, []byte("`"), []byte("`+\"`\"+`"), -1)
+
+ // Replace BOM with `+"\xEF\xBB\xBF"+`
+ // (A BOM is valid UTF-8 but not permitted in Go source files.
+ // I wouldn't bother handling this, but for some insane reason
+ // jquery.js has a BOM somewhere in the middle.)
+ return bytes.Replace(b, []byte("\xEF\xBB\xBF"), []byte("`+\"\\xEF\\xBB\\xBF\"+`"), -1)
+}
+
+func header_compressed_nomemcopy(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+func bindataRead(data, name string) ([]byte, error) {
+ gz, err := gzip.NewReader(strings.NewReader(data))
+ if err != nil {
+ return nil, fmt.Errorf("Read %%q: %%v", name, err)
+ }
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, gz)
+ clErr := gz.Close()
+
+ if err != nil {
+ return nil, fmt.Errorf("Read %%q: %%v", name, err)
+ }
+ if clErr != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+`)
+ return err
+}
+
+func header_compressed_memcopy(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+func bindataRead(data []byte, name string) ([]byte, error) {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err != nil {
+ return nil, fmt.Errorf("Read %%q: %%v", name, err)
+ }
+
+ var buf bytes.Buffer
+ _, err = io.Copy(&buf, gz)
+ clErr := gz.Close()
+
+ if err != nil {
+ return nil, fmt.Errorf("Read %%q: %%v", name, err)
+ }
+ if clErr != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
+
+`)
+ return err
+}
+
+func header_uncompressed_nomemcopy(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+ "unsafe"
+)
+
+func bindataRead(data, name string) ([]byte, error) {
+ var empty [0]byte
+ sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
+ b := empty[:]
+ bx := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ bx.Data = sx.Data
+ bx.Len = len(data)
+ bx.Cap = bx.Len
+ return b, nil
+}
+
+`)
+ return err
+}
+
+func header_uncompressed_memcopy(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+`)
+ return err
+}
+
+func header_release_common(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `type asset struct {
+ bytes []byte
+ info os.FileInfo
+}
+
+type bindataFileInfo struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+}
+
+func (fi bindataFileInfo) Name() string {
+ return fi.name
+}
+func (fi bindataFileInfo) Size() int64 {
+ return fi.size
+}
+func (fi bindataFileInfo) Mode() os.FileMode {
+ return fi.mode
+}
+func (fi bindataFileInfo) ModTime() time.Time {
+ return fi.modTime
+}
+func (fi bindataFileInfo) IsDir() bool {
+ return false
+}
+func (fi bindataFileInfo) Sys() interface{} {
+ return nil
+}
+
+`)
+ return err
+}
+
+func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
+ _, err := fmt.Fprintf(w, `var _%s = "`, asset.Func)
+ if err != nil {
+ return err
+ }
+
+ gz := gzip.NewWriter(&StringWriter{Writer: w})
+ _, err = io.Copy(gz, r)
+ gz.Close()
+
+ if err != nil {
+ return err
+ }
+
+ _, err = fmt.Fprintf(w, `"
+
+func %sBytes() ([]byte, error) {
+ return bindataRead(
+ _%s,
+ %q,
+ )
+}
+
+`, asset.Func, asset.Func, asset.Name)
+ return err
+}
+
+func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
+ _, err := fmt.Fprintf(w, `var _%s = []byte("`, asset.Func)
+ if err != nil {
+ return err
+ }
+
+ gz := gzip.NewWriter(&StringWriter{Writer: w})
+ _, err = io.Copy(gz, r)
+ gz.Close()
+
+ if err != nil {
+ return err
+ }
+
+ _, err = fmt.Fprintf(w, `")
+
+func %sBytes() ([]byte, error) {
+ return bindataRead(
+ _%s,
+ %q,
+ )
+}
+
+`, asset.Func, asset.Func, asset.Name)
+ return err
+}
+
+func uncompressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
+ _, err := fmt.Fprintf(w, `var _%s = "`, asset.Func)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(&StringWriter{Writer: w}, r)
+ if err != nil {
+ return err
+ }
+
+ _, err = fmt.Fprintf(w, `"
+
+func %sBytes() ([]byte, error) {
+ return bindataRead(
+ _%s,
+ %q,
+ )
+}
+
+`, asset.Func, asset.Func, asset.Name)
+ return err
+}
+
+func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
+ _, err := fmt.Fprintf(w, `var _%s = []byte(`, asset.Func)
+ if err != nil {
+ return err
+ }
+
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return err
+ }
+ if utf8.Valid(b) && !bytes.Contains(b, []byte{0}) {
+ fmt.Fprintf(w, "`%s`", sanitize(b))
+ } else {
+ fmt.Fprintf(w, "%+q", b)
+ }
+
+ _, err = fmt.Fprintf(w, `)
+
+func %sBytes() ([]byte, error) {
+ return _%s, nil
+}
+
+`, asset.Func, asset.Func)
+ return err
+}
+
+func asset_release_common(w io.Writer, c *Config, asset *Asset) error {
+ fi, err := os.Stat(asset.Path)
+ if err != nil {
+ return err
+ }
+
+ mode := uint(fi.Mode())
+ modTime := fi.ModTime().Unix()
+ size := fi.Size()
+ if c.NoMetadata {
+ mode = 0
+ modTime = 0
+ size = 0
+ }
+ if c.Mode > 0 {
+ mode = uint(os.ModePerm) & c.Mode
+ }
+ if c.ModTime > 0 {
+ modTime = c.ModTime
+ }
+ _, err = fmt.Fprintf(w, `func %s() (*asset, error) {
+ bytes, err := %sBytes()
+ if err != nil {
+ return nil, err
+ }
+
+ info := bindataFileInfo{name: %q, size: %d, mode: os.FileMode(%d), modTime: time.Unix(%d, 0)}
+ a := &asset{bytes: bytes, info: info}
+ return a, nil
+}
+
+`, asset.Func, asset.Func, asset.Name, size, mode, modTime)
+ return err
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/restore.go b/_tools/src/github.com/jteeuwen/go-bindata/restore.go
new file mode 100644
index 0000000..65db0e8
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/restore.go
@@ -0,0 +1,63 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "fmt"
+ "io"
+)
+
+func writeRestore(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `
+// RestoreAsset restores an asset under the given directory
+func RestoreAsset(dir, name string) error {
+ data, err := Asset(name)
+ if err != nil {
+ return err
+ }
+ info, err := AssetInfo(name)
+ if err != nil {
+ return err
+ }
+ err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+ if err != nil {
+ return err
+ }
+ err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RestoreAssets restores an asset under the given directory recursively
+func RestoreAssets(dir, name string) error {
+ children, err := AssetDir(name)
+ // File
+ if err != nil {
+ return RestoreAsset(dir, name)
+ }
+ // Dir
+ for _, child := range children {
+ err = RestoreAssets(dir, filepath.Join(name, child))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func _filePath(dir, name string) string {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
+}
+
+`)
+ return err
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/stringwriter.go b/_tools/src/github.com/jteeuwen/go-bindata/stringwriter.go
new file mode 100644
index 0000000..77daa04
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/stringwriter.go
@@ -0,0 +1,36 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "io"
+)
+
+const lowerHex = "0123456789abcdef"
+
+type StringWriter struct {
+ io.Writer
+ c int
+}
+
+func (w *StringWriter) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+
+ buf := []byte(`\x00`)
+ var b byte
+
+ for n, b = range p {
+ buf[2] = lowerHex[b/16]
+ buf[3] = lowerHex[b%16]
+ w.Writer.Write(buf)
+ w.c++
+ }
+
+ n++
+
+ return
+}
diff --git a/_tools/src/github.com/jteeuwen/go-bindata/toc.go b/_tools/src/github.com/jteeuwen/go-bindata/toc.go
new file mode 100644
index 0000000..9ec410b
--- /dev/null
+++ b/_tools/src/github.com/jteeuwen/go-bindata/toc.go
@@ -0,0 +1,230 @@
+// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
+// license. Its contents can be found at:
+// http://creativecommons.org/publicdomain/zero/1.0/
+
+package bindata
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+)
+
+type assetTree struct {
+ Asset Asset
+ Children map[string]*assetTree
+}
+
+func newAssetTree() *assetTree {
+ tree := &assetTree{}
+ tree.Children = make(map[string]*assetTree)
+ return tree
+}
+
+func (node *assetTree) child(name string) *assetTree {
+ rv, ok := node.Children[name]
+ if !ok {
+ rv = newAssetTree()
+ node.Children[name] = rv
+ }
+ return rv
+}
+
+func (root *assetTree) Add(route []string, asset Asset) {
+ for _, name := range route {
+ root = root.child(name)
+ }
+ root.Asset = asset
+}
+
+func ident(w io.Writer, n int) {
+ for i := 0; i < n; i++ {
+ w.Write([]byte{'\t'})
+ }
+}
+
+func (root *assetTree) funcOrNil() string {
+ if root.Asset.Func == "" {
+ return "nil"
+ } else {
+ return root.Asset.Func
+ }
+}
+
+func (root *assetTree) writeGoMap(w io.Writer, nident int) {
+ fmt.Fprintf(w, "&bintree{%s, map[string]*bintree{", root.funcOrNil())
+
+ if len(root.Children) > 0 {
+ io.WriteString(w, "\n")
+
+ // Sort to make output stable between invocations
+ filenames := make([]string, len(root.Children))
+ i := 0
+ for filename, _ := range root.Children {
+ filenames[i] = filename
+ i++
+ }
+ sort.Strings(filenames)
+
+ for _, p := range filenames {
+ ident(w, nident+1)
+ fmt.Fprintf(w, `"%s": `, p)
+ root.Children[p].writeGoMap(w, nident+1)
+ }
+ ident(w, nident)
+ }
+
+ io.WriteString(w, "}}")
+ if nident > 0 {
+ io.WriteString(w, ",")
+ }
+ io.WriteString(w, "\n")
+}
+
+func (root *assetTree) WriteAsGoMap(w io.Writer) error {
+ _, err := fmt.Fprint(w, `type bintree struct {
+ Func func() (*asset, error)
+ Children map[string]*bintree
+}
+var _bintree = `)
+ root.writeGoMap(w, 0)
+ return err
+}
+
+func writeTOCTree(w io.Writer, toc []Asset) error {
+ _, err := fmt.Fprintf(w, `// AssetDir returns the file names below a certain
+// directory embedded in the file by go-bindata.
+// For example if you run go-bindata on data/... and data contains the
+// following hierarchy:
+// data/
+// foo.txt
+// img/
+// a.png
+// b.png
+// then AssetDir("data") would return []string{"foo.txt", "img"}
+// AssetDir("data/img") would return []string{"a.png", "b.png"}
+// AssetDir("foo.txt") and AssetDir("notexist") would return an error
+// AssetDir("") will return []string{"data"}.
+func AssetDir(name string) ([]string, error) {
+ node := _bintree
+ if len(name) != 0 {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ pathList := strings.Split(cannonicalName, "/")
+ for _, p := range pathList {
+ node = node.Children[p]
+ if node == nil {
+ return nil, fmt.Errorf("Asset %%s not found", name)
+ }
+ }
+ }
+ if node.Func != nil {
+ return nil, fmt.Errorf("Asset %%s not found", name)
+ }
+ rv := make([]string, 0, len(node.Children))
+ for childName := range node.Children {
+ rv = append(rv, childName)
+ }
+ return rv, nil
+}
+
+`)
+ if err != nil {
+ return err
+ }
+ tree := newAssetTree()
+ for i := range toc {
+ pathList := strings.Split(toc[i].Name, "/")
+ tree.Add(pathList, toc[i])
+ }
+ return tree.WriteAsGoMap(w)
+}
+
+// writeTOC writes the table of contents file.
+func writeTOC(w io.Writer, toc []Asset) error {
+ err := writeTOCHeader(w)
+ if err != nil {
+ return err
+ }
+
+ for i := range toc {
+ err = writeTOCAsset(w, &toc[i])
+ if err != nil {
+ return err
+ }
+ }
+
+ return writeTOCFooter(w)
+}
+
+// writeTOCHeader writes the table of contents file header.
+func writeTOCHeader(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `// Asset loads and returns the asset for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func Asset(name string) ([]byte, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("Asset %%s can't read by error: %%v", name, err)
+ }
+ return a.bytes, nil
+ }
+ return nil, fmt.Errorf("Asset %%s not found", name)
+}
+
+// MustAsset is like Asset but panics when Asset would return an error.
+// It simplifies safe initialization of global variables.
+func MustAsset(name string) []byte {
+ a, err := Asset(name)
+ if err != nil {
+ panic("asset: Asset(" + name + "): " + err.Error())
+ }
+
+ return a
+}
+
+// AssetInfo loads and returns the asset info for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func AssetInfo(name string) (os.FileInfo, error) {
+ cannonicalName := strings.Replace(name, "\\", "/", -1)
+ if f, ok := _bindata[cannonicalName]; ok {
+ a, err := f()
+ if err != nil {
+ return nil, fmt.Errorf("AssetInfo %%s can't read by error: %%v", name, err)
+ }
+ return a.info, nil
+ }
+ return nil, fmt.Errorf("AssetInfo %%s not found", name)
+}
+
+// AssetNames returns the names of the assets.
+func AssetNames() []string {
+ names := make([]string, 0, len(_bindata))
+ for name := range _bindata {
+ names = append(names, name)
+ }
+ return names
+}
+
+// _bindata is a table, holding each asset generator, mapped to its name.
+var _bindata = map[string]func() (*asset, error){
+`)
+ return err
+}
+
+// writeTOCAsset write a TOC entry for the given asset.
+func writeTOCAsset(w io.Writer, asset *Asset) error {
+ _, err := fmt.Fprintf(w, "\t%q: %s,\n", asset.Name, asset.Func)
+ return err
+}
+
+// writeTOCFooter writes the table of contents file footer.
+func writeTOCFooter(w io.Writer) error {
+ _, err := fmt.Fprintf(w, `}
+
+`)
+ return err
+}
diff --git a/_tools/src/github.com/golang/lint/LICENSE b/_tools/src/golang.org/x/lint/LICENSE
similarity index 100%
rename from _tools/src/github.com/golang/lint/LICENSE
rename to _tools/src/golang.org/x/lint/LICENSE
diff --git a/_tools/src/github.com/golang/lint/README.md b/_tools/src/golang.org/x/lint/README.md
similarity index 75%
rename from _tools/src/github.com/golang/lint/README.md
rename to _tools/src/golang.org/x/lint/README.md
index 3593ddd..487eba7 100644
--- a/_tools/src/github.com/golang/lint/README.md
+++ b/_tools/src/golang.org/x/lint/README.md
@@ -4,9 +4,12 @@ Golint is a linter for Go source code.
## Installation
-Golint requires Go 1.6 or later.
+Golint requires a
+[supported release of Go](https://golang.org/doc/devel/release.html#policy).
- go get -u github.com/golang/lint/golint
+ go get -u golang.org/x/lint/golint
+
+To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
## Usage
@@ -41,15 +44,18 @@ Golint makes suggestions for many of the mechanically checkable items listed in
[Effective Go](https://golang.org/doc/effective_go.html) and the
[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
-If you find an established style that is frequently violated, and which
-you think golint could statically check,
-[file an issue](https://github.com/golang/lint/issues).
+## Scope
+
+Golint is meant to carry out the stylistic conventions put forth in
+[Effective Go](https://golang.org/doc/effective_go.html) and
+[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
+Changes that are not aligned with those documents will not be considered.
## Contributions
-Contributions to this project are welcome, though please send mail before
-starting work on anything major. Contributors retain their copyright, so we
-need you to fill out
+Contributions to this project are welcome provided they are [in scope](#scope),
+though please send mail before starting work on anything major.
+Contributors retain their copyright, so we need you to fill out
[a short form](https://developers.google.com/open-source/cla/individual)
before we can accept your contribution.
@@ -57,7 +63,7 @@ before we can accept your contribution.
Add this to your ~/.vimrc:
- set rtp+=$GOPATH/src/github.com/golang/lint/misc/vim
+ set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
diff --git a/_tools/src/github.com/golang/lint/golint/golint.go b/_tools/src/golang.org/x/lint/golint/golint.go
similarity index 99%
rename from _tools/src/github.com/golang/lint/golint/golint.go
rename to _tools/src/golang.org/x/lint/golint/golint.go
index d8360ad..ac024b6 100644
--- a/_tools/src/github.com/golang/lint/golint/golint.go
+++ b/_tools/src/golang.org/x/lint/golint/golint.go
@@ -16,7 +16,7 @@ import (
"path/filepath"
"strings"
- "github.com/golang/lint"
+ "golang.org/x/lint"
)
var (
diff --git a/_tools/src/github.com/golang/lint/golint/import.go b/_tools/src/golang.org/x/lint/golint/import.go
similarity index 98%
rename from _tools/src/github.com/golang/lint/golint/import.go
rename to _tools/src/golang.org/x/lint/golint/import.go
index 02a0daa..2ba9dea 100644
--- a/_tools/src/github.com/golang/lint/golint/import.go
+++ b/_tools/src/golang.org/x/lint/golint/import.go
@@ -22,11 +22,10 @@ import (
"strings"
)
-var buildContext = build.Default
-
var (
- goroot = filepath.Clean(runtime.GOROOT())
- gorootSrc = filepath.Join(goroot, "src")
+ buildContext = build.Default
+ goroot = filepath.Clean(runtime.GOROOT())
+ gorootSrc = filepath.Join(goroot, "src")
)
// importPathsNoDotExpansion returns the import paths to use for the given
diff --git a/_tools/src/golang.org/x/lint/golint/importcomment.go b/_tools/src/golang.org/x/lint/golint/importcomment.go
new file mode 100644
index 0000000..d5b32f7
--- /dev/null
+++ b/_tools/src/golang.org/x/lint/golint/importcomment.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2018 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// +build go1.12
+
+// Require use of the correct import path only for Go 1.12+ users, so
+// any breakages coincide with people updating their CI configs or
+// whatnot.
+
+package main // import "golang.org/x/lint/golint"
diff --git a/_tools/src/github.com/golang/lint/lint.go b/_tools/src/golang.org/x/lint/lint.go
similarity index 92%
rename from _tools/src/github.com/golang/lint/lint.go
rename to _tools/src/golang.org/x/lint/lint.go
index a13129c..6b9fd6e 100644
--- a/_tools/src/github.com/golang/lint/lint.go
+++ b/_tools/src/golang.org/x/lint/lint.go
@@ -5,9 +5,10 @@
// https://developers.google.com/open-source/licenses/bsd.
// Package lint contains a linter for Go source code.
-package lint
+package lint // import "golang.org/x/lint"
import (
+ "bufio"
"bytes"
"fmt"
"go/ast"
@@ -22,6 +23,7 @@ import (
"unicode"
"unicode/utf8"
+ "golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/gcexportdata"
)
@@ -81,15 +83,15 @@ func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
// LintFiles lints a set of files of a single package.
// The argument is a map of filename to source.
func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
- if len(files) == 0 {
- return nil, nil
- }
pkg := &pkg{
fset: token.NewFileSet(),
files: make(map[string]*file),
}
var pkgName string
for filename, src := range files {
+ if isGenerated(src) {
+ continue // See issue #239
+ }
f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments)
if err != nil {
return nil, err
@@ -107,9 +109,30 @@ func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) {
filename: filename,
}
}
+ if len(pkg.files) == 0 {
+ return nil, nil
+ }
return pkg.lint(), nil
}
+var (
+ genHdr = []byte("// Code generated ")
+ genFtr = []byte(" DO NOT EDIT.")
+)
+
+// isGenerated reports whether the source file is generated code
+// according the rules from https://golang.org/s/generatedcode.
+func isGenerated(src []byte) bool {
+ sc := bufio.NewScanner(bytes.NewReader(src))
+ for sc.Scan() {
+ b := sc.Bytes()
+ if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) {
+ return true
+ }
+ }
+ return false
+}
+
// pkg represents a package being linted.
type pkg struct {
fset *token.FileSet
@@ -504,7 +527,10 @@ func (f *file) lintExported() {
})
}
-var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+var (
+ allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+ anyCapsRE = regexp.MustCompile(`[A-Z]`)
+)
// knownNameExceptions is a set of names that are known to be exempt from naming checks.
// This is usually because they are constrained by having to match names in the
@@ -514,6 +540,18 @@ var knownNameExceptions = map[string]bool{
"kWh": true,
}
+func isInTopLevel(f *ast.File, ident *ast.Ident) bool {
+ path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End())
+ for _, f := range path {
+ switch f.(type) {
+ case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident:
+ continue
+ }
+ return false
+ }
+ return true
+}
+
// lintNames examines all names in the file.
// It complains if any use underscores or incorrect known initialisms.
func (f *file) lintNames() {
@@ -521,6 +559,9 @@ func (f *file) lintNames() {
if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name")
}
+ if anyCapsRE.MatchString(f.f.Name.Name) {
+ f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name))
+ }
check := func(id *ast.Ident, thing string) {
if id.Name == "_" {
@@ -532,12 +573,22 @@ func (f *file) lintNames() {
// Handle two common styles from other languages that don't belong in Go.
if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
- f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
- return
+ capCount := 0
+ for _, c := range id.Name {
+ if 'A' <= c && c <= 'Z' {
+ capCount++
+ }
+ }
+ if capCount >= 2 {
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase")
+ return
+ }
}
- if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
- should := string(id.Name[1]+'a'-'A') + id.Name[2:]
- f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+ if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) {
+ if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
+ should := string(id.Name[1]+'a'-'A') + id.Name[2:]
+ f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+ }
}
should := lintName(id.Name)
@@ -1016,13 +1067,13 @@ func (f *file) lintElses() {
if !ok || ifStmt.Else == nil {
return true
}
- if ignore[ifStmt] {
- return true
- }
if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
ignore[elseif] = true
return true
}
+ if ignore[ifStmt] {
+ return true
+ }
if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
// only care about elses without conditions
return true
@@ -1055,20 +1106,25 @@ func (f *file) lintRanges() {
if !ok {
return true
}
- if rs.Value == nil {
- // for x = range m { ... }
- return true // single var form
- }
- if !isIdent(rs.Value, "_") {
- // for ?, y = range m { ... }
+
+ if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) {
+ p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`")
+
+ newRS := *rs // shallow copy
+ newRS.Value = nil
+ newRS.Key = nil
+ p.ReplacementLine = f.firstLineOf(&newRS, rs)
+
return true
}
- p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
+ if isIdent(rs.Value, "_") {
+ p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
- newRS := *rs // shallow copy
- newRS.Value = nil
- p.ReplacementLine = f.firstLineOf(&newRS, rs)
+ newRS := *rs // shallow copy
+ newRS.Value = nil
+ p.ReplacementLine = f.firstLineOf(&newRS, rs)
+ }
return true
})
@@ -1092,6 +1148,9 @@ func (f *file) lintErrorf() {
if !isErrorsNew && !isTestingError {
return true
}
+ if !f.imports("errors") {
+ return true
+ }
arg := ce.Args[0]
ce, ok = arg.(*ast.CallExpr)
if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
@@ -1215,7 +1274,7 @@ func (f *file) lintReceiverNames() {
name := names[0].Name
const ref = styleGuideBase + "#receiver-names"
if name == "_" {
- f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore`)
+ f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`)
return true
}
if name == "this" || name == "self" {
@@ -1272,6 +1331,9 @@ func (f *file) lintErrorReturn() {
if len(ret) <= 1 {
return true
}
+ if isIdent(ret[len(ret)-1].Type, "error") {
+ return true
+ }
// An error return parameter should be the last parameter.
// Flag any error parameters found before the last.
for _, r := range ret[:len(ret)-1] {
@@ -1444,6 +1506,28 @@ func (f *file) lintContextArgs() {
})
}
+// containsComments returns whether the interval [start, end) contains any
+// comments without "// MATCH " prefix.
+func (f *file) containsComments(start, end token.Pos) bool {
+ for _, cgroup := range f.f.Comments {
+ comments := cgroup.List
+ if comments[0].Slash >= end {
+ // All comments starting with this group are after end pos.
+ return false
+ }
+ if comments[len(comments)-1].Slash < start {
+ // Comments group ends before start pos.
+ continue
+ }
+ for _, c := range comments {
+ if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") {
+ return true
+ }
+ }
+ }
+ return false
+}
+
// receiverType returns the named type of the method receiver, sans "*",
// or "invalid-type" if fn.Recv is ill formed.
func receiverType(fn *ast.FuncDecl) string {
@@ -1504,11 +1588,6 @@ func isPkgDot(expr ast.Expr, pkg, name string) bool {
return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
}
-func isZero(expr ast.Expr) bool {
- lit, ok := expr.(*ast.BasicLit)
- return ok && lit.Kind == token.INT && lit.Value == "0"
-}
-
func isOne(expr ast.Expr) bool {
lit, ok := expr.(*ast.BasicLit)
return ok && lit.Kind == token.INT && lit.Value == "1"
@@ -1586,6 +1665,20 @@ func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) {
return rx.FindStringSubmatch(line)
}
+// imports returns true if the current file imports the specified package path.
+func (f *file) imports(importPath string) bool {
+ all := astutil.Imports(f.fset, f.f)
+ for _, p := range all {
+ for _, i := range p {
+ uq, err := strconv.Unquote(i.Path.Value)
+ if err == nil && importPath == uq {
+ return true
+ }
+ }
+ }
+ return false
+}
+
// srcLine returns the complete line at p, including the terminating newline.
func srcLine(src []byte, p token.Position) string {
// Run to end of line in both directions if not at line start/end.
diff --git a/_tools/src/golang.org/x/tools/go/ast/astutil/imports.go b/_tools/src/golang.org/x/tools/go/ast/astutil/imports.go
index 83f196c..3e4b195 100644
--- a/_tools/src/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/_tools/src/golang.org/x/tools/go/ast/astutil/imports.go
@@ -14,26 +14,26 @@ import (
)
// AddImport adds the import path to the file f, if absent.
-func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
- return AddNamedImport(fset, f, "", ipath)
+func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
+ return AddNamedImport(fset, f, "", path)
}
-// AddNamedImport adds the import path to the file f, if absent.
+// AddNamedImport adds the import with the given name and path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
-func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
- if imports(f, ipath) {
+func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
+ if imports(f, name, path) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
- Value: strconv.Quote(ipath),
+ Value: strconv.Quote(path),
},
}
if name != "" {
@@ -43,14 +43,14 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
- // prefix with ipath.
+ // prefix with path.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
- isThirdPartyPath = isThirdParty(ipath)
+ isThirdPartyPath = isThirdParty(path)
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
@@ -81,7 +81,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
p := importPath(impspec)
- n := matchLen(p, ipath)
+ n := matchLen(p, path)
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
bestMatch = n
impDecl = gen
@@ -101,8 +101,8 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
- // Our new import goes after the package declaration and after
- // the comment, if any, that starts on the same line as the
+ // Our new import, preceded by a blank line, goes after the package declaration
+ // and after the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
@@ -112,7 +112,8 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added
if file.Line(c.Pos()) > pkgLine {
break
}
- impDecl.TokPos = c.End()
+ // +2 for a blank line
+ impDecl.TokPos = c.End() + 2
}
}
f.Decls = append(f.Decls, nil)
@@ -196,11 +197,13 @@ func isThirdParty(importPath string) bool {
}
// DeleteImport deletes the import path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
return DeleteNamedImport(fset, f, "", path)
}
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
+// If there are duplicate import declarations, all matching ones are deleted.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
var delcomments []*ast.CommentGroup
@@ -215,13 +218,7 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
- if impspec.Name == nil && name != "" {
- continue
- }
- if impspec.Name != nil && impspec.Name.Name != name {
- continue
- }
- if importPath(impspec) != path {
+ if importName(impspec) != name || importPath(impspec) != path {
continue
}
@@ -382,9 +379,14 @@ func (fn visitFn) Visit(node ast.Node) ast.Visitor {
return fn
}
-// imports returns true if f imports path.
-func imports(f *ast.File, path string) bool {
- return importSpec(f, path) != nil
+// imports reports whether f has an import with the specified name and path.
+func imports(f *ast.File, name, path string) bool {
+ for _, s := range f.Imports {
+ if importName(s) == name && importPath(s) == path {
+ return true
+ }
+ }
+ return false
}
// importSpec returns the import spec if f imports path,
@@ -398,14 +400,23 @@ func importSpec(f *ast.File, path string) *ast.ImportSpec {
return nil
}
+// importName returns the name of s,
+// or "" if the import is not named.
+func importName(s *ast.ImportSpec) string {
+ if s.Name == nil {
+ return ""
+ }
+ return s.Name.Name
+}
+
// importPath returns the unquoted import path of s,
// or "" if the path is not properly quoted.
func importPath(s *ast.ImportSpec) string {
t, err := strconv.Unquote(s.Path.Value)
- if err == nil {
- return t
+ if err != nil {
+ return ""
}
- return ""
+ return t
}
// declImports reports whether gen contains an import of path.
diff --git a/_tools/src/golang.org/x/tools/go/ast/astutil/rewrite.go b/_tools/src/golang.org/x/tools/go/ast/astutil/rewrite.go
new file mode 100644
index 0000000..cf72ea9
--- /dev/null
+++ b/_tools/src/golang.org/x/tools/go/ast/astutil/rewrite.go
@@ -0,0 +1,477 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package astutil
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+ "sort"
+)
+
+// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
+// before and/or after the node's children, using a Cursor describing
+// the current node and providing operations on it.
+//
+// The return value of ApplyFunc controls the syntax tree traversal.
+// See Apply for details.
+type ApplyFunc func(*Cursor) bool
+
+// Apply traverses a syntax tree recursively, starting with root,
+// and calling pre and post for each node as described below.
+// Apply returns the syntax tree, possibly modified.
+//
+// If pre is not nil, it is called for each node before the node's
+// children are traversed (pre-order). If pre returns false, no
+// children are traversed, and post is not called for that node.
+//
+// If post is not nil, and a prior call of pre didn't return false,
+// post is called for each node after its children are traversed
+// (post-order). If post returns false, traversal is terminated and
+// Apply returns immediately.
+//
+// Only fields that refer to AST nodes are considered children;
+// i.e., token.Pos, Scopes, Objects, and fields of basic types
+// (strings, etc.) are ignored.
+//
+// Children are traversed in the order in which they appear in the
+// respective node's struct definition. A package's files are
+// traversed in the filenames' alphabetical order.
+//
+func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
+ parent := &struct{ ast.Node }{root}
+ defer func() {
+ if r := recover(); r != nil && r != abort {
+ panic(r)
+ }
+ result = parent.Node
+ }()
+ a := &application{pre: pre, post: post}
+ a.apply(parent, "Node", nil, root)
+ return
+}
+
+var abort = new(int) // singleton, to signal termination of Apply
+
+// A Cursor describes a node encountered during Apply.
+// Information about the node and its parent is available
+// from the Node, Parent, Name, and Index methods.
+//
+// If p is a variable of type and value of the current parent node
+// c.Parent(), and f is the field identifier with name c.Name(),
+// the following invariants hold:
+//
+// p.f == c.Node() if c.Index() < 0
+// p.f[c.Index()] == c.Node() if c.Index() >= 0
+//
+// The methods Replace, Delete, InsertBefore, and InsertAfter
+// can be used to change the AST without disrupting Apply.
+type Cursor struct {
+ parent ast.Node
+ name string
+ iter *iterator // valid if non-nil
+ node ast.Node
+}
+
+// Node returns the current Node.
+func (c *Cursor) Node() ast.Node { return c.node }
+
+// Parent returns the parent of the current Node.
+func (c *Cursor) Parent() ast.Node { return c.parent }
+
+// Name returns the name of the parent Node field that contains the current Node.
+// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
+// the filename for the current Node.
+func (c *Cursor) Name() string { return c.name }
+
+// Index reports the index >= 0 of the current Node in the slice of Nodes that
+// contains it, or a value < 0 if the current Node is not part of a slice.
+// The index of the current node changes if InsertBefore is called while
+// processing the current node.
+func (c *Cursor) Index() int {
+ if c.iter != nil {
+ return c.iter.index
+ }
+ return -1
+}
+
+// field returns the current node's parent field value.
+func (c *Cursor) field() reflect.Value {
+ return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
+}
+
+// Replace replaces the current Node with n.
+// The replacement node is not walked by Apply.
+func (c *Cursor) Replace(n ast.Node) {
+ if _, ok := c.node.(*ast.File); ok {
+ file, ok := n.(*ast.File)
+ if !ok {
+ panic("attempt to replace *ast.File with non-*ast.File")
+ }
+ c.parent.(*ast.Package).Files[c.name] = file
+ return
+ }
+
+ v := c.field()
+ if i := c.Index(); i >= 0 {
+ v = v.Index(i)
+ }
+ v.Set(reflect.ValueOf(n))
+}
+
+// Delete deletes the current Node from its containing slice.
+// If the current Node is not part of a slice, Delete panics.
+// As a special case, if the current node is a package file,
+// Delete removes it from the package's Files map.
+func (c *Cursor) Delete() {
+ if _, ok := c.node.(*ast.File); ok {
+ delete(c.parent.(*ast.Package).Files, c.name)
+ return
+ }
+
+ i := c.Index()
+ if i < 0 {
+ panic("Delete node not contained in slice")
+ }
+ v := c.field()
+ l := v.Len()
+ reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
+ v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
+ v.SetLen(l - 1)
+ c.iter.step--
+}
+
+// InsertAfter inserts n after the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertAfter panics.
+// Apply does not walk n.
+func (c *Cursor) InsertAfter(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertAfter node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
+ v.Index(i + 1).Set(reflect.ValueOf(n))
+ c.iter.step++
+}
+
+// InsertBefore inserts n before the current Node in its containing slice.
+// If the current Node is not part of a slice, InsertBefore panics.
+// Apply will not walk n.
+func (c *Cursor) InsertBefore(n ast.Node) {
+ i := c.Index()
+ if i < 0 {
+ panic("InsertBefore node not contained in slice")
+ }
+ v := c.field()
+ v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
+ l := v.Len()
+ reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
+ v.Index(i).Set(reflect.ValueOf(n))
+ c.iter.index++
+}
+
+// application carries all the shared data so we can pass it around cheaply.
+type application struct {
+ pre, post ApplyFunc
+ cursor Cursor
+ iter iterator
+}
+
+func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
+ // convert typed nil into untyped nil
+ if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
+ n = nil
+ }
+
+ // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
+ saved := a.cursor
+ a.cursor.parent = parent
+ a.cursor.name = name
+ a.cursor.iter = iter
+ a.cursor.node = n
+
+ if a.pre != nil && !a.pre(&a.cursor) {
+ a.cursor = saved
+ return
+ }
+
+ // walk children
+ // (the order of the cases matches the order of the corresponding node types in go/ast)
+ switch n := n.(type) {
+ case nil:
+ // nothing to do
+
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ if n != nil {
+ a.applyList(n, "List")
+ }
+
+ case *ast.Field:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.FieldList:
+ a.applyList(n, "List")
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.FuncLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CompositeLit:
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Elts")
+
+ case *ast.ParenExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SelectorExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Sel", nil, n.Sel)
+
+ case *ast.IndexExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Index", nil, n.Index)
+
+ case *ast.SliceExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Low", nil, n.Low)
+ a.apply(n, "High", nil, n.High)
+ a.apply(n, "Max", nil, n.Max)
+
+ case *ast.TypeAssertExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Type", nil, n.Type)
+
+ case *ast.CallExpr:
+ a.apply(n, "Fun", nil, n.Fun)
+ a.applyList(n, "Args")
+
+ case *ast.StarExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.UnaryExpr:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.BinaryExpr:
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Y", nil, n.Y)
+
+ case *ast.KeyValueExpr:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ a.apply(n, "Len", nil, n.Len)
+ a.apply(n, "Elt", nil, n.Elt)
+
+ case *ast.StructType:
+ a.apply(n, "Fields", nil, n.Fields)
+
+ case *ast.FuncType:
+ a.apply(n, "Params", nil, n.Params)
+ a.apply(n, "Results", nil, n.Results)
+
+ case *ast.InterfaceType:
+ a.apply(n, "Methods", nil, n.Methods)
+
+ case *ast.MapType:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.ChanType:
+ a.apply(n, "Value", nil, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ a.apply(n, "Decl", nil, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ a.apply(n, "Label", nil, n.Label)
+ a.apply(n, "Stmt", nil, n.Stmt)
+
+ case *ast.ExprStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.SendStmt:
+ a.apply(n, "Chan", nil, n.Chan)
+ a.apply(n, "Value", nil, n.Value)
+
+ case *ast.IncDecStmt:
+ a.apply(n, "X", nil, n.X)
+
+ case *ast.AssignStmt:
+ a.applyList(n, "Lhs")
+ a.applyList(n, "Rhs")
+
+ case *ast.GoStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.DeferStmt:
+ a.apply(n, "Call", nil, n.Call)
+
+ case *ast.ReturnStmt:
+ a.applyList(n, "Results")
+
+ case *ast.BranchStmt:
+ a.apply(n, "Label", nil, n.Label)
+
+ case *ast.BlockStmt:
+ a.applyList(n, "List")
+
+ case *ast.IfStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Body", nil, n.Body)
+ a.apply(n, "Else", nil, n.Else)
+
+ case *ast.CaseClause:
+ a.applyList(n, "List")
+ a.applyList(n, "Body")
+
+ case *ast.SwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Tag", nil, n.Tag)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Assign", nil, n.Assign)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.CommClause:
+ a.apply(n, "Comm", nil, n.Comm)
+ a.applyList(n, "Body")
+
+ case *ast.SelectStmt:
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.ForStmt:
+ a.apply(n, "Init", nil, n.Init)
+ a.apply(n, "Cond", nil, n.Cond)
+ a.apply(n, "Post", nil, n.Post)
+ a.apply(n, "Body", nil, n.Body)
+
+ case *ast.RangeStmt:
+ a.apply(n, "Key", nil, n.Key)
+ a.apply(n, "Value", nil, n.Value)
+ a.apply(n, "X", nil, n.X)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Path", nil, n.Path)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.ValueSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Names")
+ a.apply(n, "Type", nil, n.Type)
+ a.applyList(n, "Values")
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.TypeSpec:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Comment", nil, n.Comment)
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.applyList(n, "Specs")
+
+ case *ast.FuncDecl:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Recv", nil, n.Recv)
+ a.apply(n, "Name", nil, n.Name)
+ a.apply(n, "Type", nil, n.Type)
+ a.apply(n, "Body", nil, n.Body)
+
+ // Files and packages
+ case *ast.File:
+ a.apply(n, "Doc", nil, n.Doc)
+ a.apply(n, "Name", nil, n.Name)
+ a.applyList(n, "Decls")
+ // Don't walk n.Comments; they have either been walked already if
+ // they are Doc comments, or they can be easily walked explicitly.
+
+ case *ast.Package:
+ // collect and sort names for reproducible behavior
+ var names []string
+ for name := range n.Files {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ a.apply(n, name, nil, n.Files[name])
+ }
+
+ default:
+ panic(fmt.Sprintf("Apply: unexpected node type %T", n))
+ }
+
+ if a.post != nil && !a.post(&a.cursor) {
+ panic(abort)
+ }
+
+ a.cursor = saved
+}
+
+// An iterator controls iteration over a slice of nodes.
+type iterator struct {
+ index, step int
+}
+
+func (a *application) applyList(parent ast.Node, name string) {
+ // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
+ saved := a.iter
+ a.iter.index = 0
+ for {
+ // must reload parent.name each time, since cursor modifications might change it
+ v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
+ if a.iter.index >= v.Len() {
+ break
+ }
+
+ // element x may be nil in a bad AST - be cautious
+ var x ast.Node
+ if e := v.Index(a.iter.index); e.IsValid() {
+ x = e.Interface().(ast.Node)
+ }
+
+ a.iter.step = 1
+ a.apply(parent, name, &a.iter, x)
+ a.iter.index += a.iter.step
+ }
+ a.iter = saved
+}
diff --git a/_tools/src/golang.org/x/tools/go/buildutil/fakecontext.go b/_tools/src/golang.org/x/tools/go/buildutil/fakecontext.go
index 24cbcbe..8b7f066 100644
--- a/_tools/src/golang.org/x/tools/go/buildutil/fakecontext.go
+++ b/_tools/src/golang.org/x/tools/go/buildutil/fakecontext.go
@@ -41,6 +41,7 @@ func FakeContext(pkgs map[string]map[string]string) *build.Context {
ctxt := build.Default // copy
ctxt.GOROOT = "/go"
ctxt.GOPATH = ""
+ ctxt.Compiler = "gc"
ctxt.IsDir = func(dir string) bool {
dir = clean(dir)
if dir == "" {
diff --git a/_tools/src/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/_tools/src/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index e53270e..98b3987 100644
--- a/_tools/src/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/_tools/src/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -7,9 +7,6 @@
// gc compiler. This package supports go1.7 export data format and all
// later versions.
//
-// This package replaces the deprecated golang.org/x/tools/go/gcimporter15
-// package, which will be deleted in October 2017.
-//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
@@ -19,7 +16,7 @@
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
-// Go update. (See discussion at https://github.com/golang/go/issues/15651.)
+// Go update. (See discussion at https://golang.org/issue/15651.)
//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
@@ -32,7 +29,7 @@ import (
"io"
"io/ioutil"
- gcimporter "golang.org/x/tools/go/gcimporter15"
+ "golang.org/x/tools/go/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
@@ -88,6 +85,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+ }
+
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
return pkg, err
}
@@ -95,6 +100,10 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
- _, err := out.Write(gcimporter.BExportData(fset, pkg))
+ b, err := gcimporter.BExportData(fset, pkg)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(b)
return err
}
diff --git a/_tools/src/golang.org/x/tools/go/gcexportdata/main.go b/_tools/src/golang.org/x/tools/go/gcexportdata/main.go
index 106046c..2713dce 100644
--- a/_tools/src/golang.org/x/tools/go/gcexportdata/main.go
+++ b/_tools/src/golang.org/x/tools/go/gcexportdata/main.go
@@ -20,11 +20,13 @@ import (
"golang.org/x/tools/go/types/typeutil"
)
+var packageFlag = flag.String("package", "", "alternative package to print")
+
func main() {
log.SetPrefix("gcexportdata: ")
log.SetFlags(0)
flag.Usage = func() {
- fmt.Fprintln(os.Stderr, "usage: gcexportdata file.a")
+ fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
}
flag.Parse()
if flag.NArg() != 1 {
@@ -44,11 +46,27 @@ func main() {
}
// Decode the package.
+ const primary = ""
imports := make(map[string]*types.Package)
fset := token.NewFileSet()
- pkg, err := gcexportdata.Read(r, fset, imports, "dummy")
+ pkg, err := gcexportdata.Read(r, fset, imports, primary)
if err != nil {
- log.Fatal("%s: %s", filename, err)
+ log.Fatalf("%s: %s", filename, err)
+ }
+
+ // Optionally select an indirectly mentioned package.
+ if *packageFlag != "" {
+ pkg = imports[*packageFlag]
+ if pkg == nil {
+ fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
+ filename, *packageFlag)
+ for p := range imports {
+ if p != primary {
+ fmt.Fprintf(os.Stderr, "\t%s\n", p)
+ }
+ }
+ os.Exit(1)
+ }
}
// Print all package-level declarations, including non-exported ones.
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias18.go b/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias18.go
deleted file mode 100644
index 225ffee..0000000
--- a/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias18.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.9
-
-package gcimporter
-
-import "go/types"
-
-func isAlias(obj *types.TypeName) bool {
- return false // there are no type aliases before Go 1.9
-}
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias19.go b/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias19.go
deleted file mode 100644
index c2025d8..0000000
--- a/_tools/src/golang.org/x/tools/go/gcimporter15/isAlias19.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.9
-
-package gcimporter
-
-import "go/types"
-
-func isAlias(obj *types.TypeName) bool {
- return obj.IsAlias()
-}
diff --git a/_tools/src/golang.org/x/tools/go/loader/cgo.go b/_tools/src/golang.org/x/tools/go/internal/cgo/cgo.go
similarity index 85%
rename from _tools/src/golang.org/x/tools/go/loader/cgo.go
rename to _tools/src/golang.org/x/tools/go/internal/cgo/cgo.go
index 72c6f50..0f652ea 100644
--- a/_tools/src/golang.org/x/tools/go/loader/cgo.go
+++ b/_tools/src/golang.org/x/tools/go/internal/cgo/cgo.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package loader
+package cgo
// This file handles cgo preprocessing of files containing `import "C"`.
//
@@ -66,10 +66,10 @@ import (
"strings"
)
-// processCgoFiles invokes the cgo preprocessor on bp.CgoFiles, parses
+// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
// the output and returns the resulting ASTs.
//
-func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
+func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
if err != nil {
return nil, err
@@ -81,7 +81,7 @@ func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(pa
pkgdir = DisplayPath(pkgdir)
}
- cgoFiles, cgoDisplayFiles, err := runCgo(bp, pkgdir, tmpdir)
+ cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
if err != nil {
return nil, err
}
@@ -104,15 +104,20 @@ func processCgoFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(pa
var cgoRe = regexp.MustCompile(`[/\\:]`)
-// runCgo invokes the cgo preprocessor on bp.CgoFiles and returns two
+// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
// lists of files: the resulting processed files (in temporary
// directory tmpdir) and the corresponding names of the unprocessed files.
//
-// runCgo is adapted from (*builder).cgo in
+// Run is adapted from (*builder).cgo in
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
// Objective C, CGOPKGPATH, CGO_FLAGS.
//
-func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []string, err error) {
+// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
+// to the cgo preprocessor. This in turn will set the // line comments
+// referring to those files to use absolute paths. This is needed for
+// go/packages using the legacy go list support so it is able to find
+// the original files.
+func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
@@ -145,9 +150,17 @@ func runCgo(bp *build.Package, pkgdir, tmpdir string) (files, displayFiles []str
cgoflags = append(cgoflags, "-import_syscall=false")
}
+ var cgoFiles []string = bp.CgoFiles
+ if useabs {
+ cgoFiles = make([]string, len(bp.CgoFiles))
+ for i := range cgoFiles {
+ cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
+ }
+ }
+
args := stringList(
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
- cgoCPPFLAGS, cgoexeCFLAGS, bp.CgoFiles,
+ cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
)
if false {
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
diff --git a/_tools/src/golang.org/x/tools/go/loader/cgo_pkgconfig.go b/_tools/src/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
similarity index 98%
rename from _tools/src/golang.org/x/tools/go/loader/cgo_pkgconfig.go
rename to _tools/src/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
index de57422..b5bb95a 100644
--- a/_tools/src/golang.org/x/tools/go/loader/cgo_pkgconfig.go
+++ b/_tools/src/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package loader
+package cgo
import (
"errors"
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/bexport.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/bexport.go
similarity index 91%
rename from _tools/src/golang.org/x/tools/go/gcimporter15/bexport.go
rename to _tools/src/golang.org/x/tools/go/internal/gcimporter/bexport.go
index cbf8bc0..a807d0a 100644
--- a/_tools/src/golang.org/x/tools/go/gcimporter15/bexport.go
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/bexport.go
@@ -16,7 +16,6 @@ import (
"go/constant"
"go/token"
"go/types"
- "log"
"math"
"math/big"
"sort"
@@ -39,6 +38,11 @@ const debugFormat = false // default: false
const trace = false // default: false
// Current export format version. Increase with each format change.
+// Note: The latest binary (non-indexed) export format is at version 6.
+// This exporter is still at level 4, but it doesn't matter since
+// the binary importer can handle older versions just fine.
+// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
+// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
// 4: type name objects support type aliases, uses aliasTag
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
// 2: removed unused bool in ODCL export (compiler only)
@@ -76,9 +80,29 @@ type exporter struct {
indent int // for trace
}
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
+
// BExportData returns binary export data for pkg.
// If no file set is provided, position info will be missing.
-func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
+func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
p := exporter{
fset: fset,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
@@ -103,11 +127,11 @@ func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
// --- generic export data ---
// populate type map with predeclared "known" types
- for index, typ := range predeclared {
+ for index, typ := range predeclared() {
p.typIndex[typ] = index
}
- if len(p.typIndex) != len(predeclared) {
- log.Fatalf("gcimporter: duplicate entries in type map?")
+ if len(p.typIndex) != len(predeclared()) {
+ return nil, internalError("duplicate entries in type map?")
}
// write package data
@@ -145,12 +169,12 @@ func BExportData(fset *token.FileSet, pkg *types.Package) []byte {
// --- end of export data ---
- return p.out.Bytes()
+ return p.out.Bytes(), nil
}
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
if pkg == nil {
- log.Fatalf("gcimporter: unexpected nil pkg")
+ panic(internalError("unexpected nil pkg"))
}
// if we saw the package before, write its index (>= 0)
@@ -185,7 +209,7 @@ func (p *exporter) obj(obj types.Object) {
p.value(obj.Val())
case *types.TypeName:
- if isAlias(obj) {
+ if obj.IsAlias() {
p.tag(aliasTag)
p.pos(obj)
p.qualifiedName(obj)
@@ -209,7 +233,7 @@ func (p *exporter) obj(obj types.Object) {
p.paramList(sig.Results(), false)
default:
- log.Fatalf("gcimporter: unexpected object %v (%T)", obj, obj)
+ panic(internalErrorf("unexpected object %v (%T)", obj, obj))
}
}
@@ -273,7 +297,7 @@ func (p *exporter) qualifiedName(obj types.Object) {
func (p *exporter) typ(t types.Type) {
if t == nil {
- log.Fatalf("gcimporter: nil type")
+ panic(internalError("nil type"))
}
// Possible optimization: Anonymous pointer types *T where
@@ -356,7 +380,7 @@ func (p *exporter) typ(t types.Type) {
p.typ(t.Elem())
default:
- log.Fatalf("gcimporter: unexpected type %T: %s", t, t)
+ panic(internalErrorf("unexpected type %T: %s", t, t))
}
}
@@ -422,7 +446,7 @@ func (p *exporter) fieldList(t *types.Struct) {
func (p *exporter) field(f *types.Var) {
if !f.IsField() {
- log.Fatalf("gcimporter: field expected")
+ panic(internalError("field expected"))
}
p.pos(f)
@@ -452,7 +476,7 @@ func (p *exporter) iface(t *types.Interface) {
func (p *exporter) method(m *types.Func) {
sig := m.Type().(*types.Signature)
if sig.Recv() == nil {
- log.Fatalf("gcimporter: method expected")
+ panic(internalError("method expected"))
}
p.pos(m)
@@ -575,13 +599,13 @@ func (p *exporter) value(x constant.Value) {
p.tag(unknownTag)
default:
- log.Fatalf("gcimporter: unexpected value %v (%T)", x, x)
+ panic(internalErrorf("unexpected value %v (%T)", x, x))
}
}
func (p *exporter) float(x constant.Value) {
if x.Kind() != constant.Float {
- log.Fatalf("gcimporter: unexpected constant %v, want float", x)
+ panic(internalErrorf("unexpected constant %v, want float", x))
}
// extract sign (there is no -0)
sign := constant.Sign(x)
@@ -616,7 +640,7 @@ func (p *exporter) float(x constant.Value) {
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
- log.Fatalf("gcimporter: internal error")
+ panic(internalError("internal error"))
}
p.int(sign)
@@ -653,7 +677,7 @@ func (p *exporter) bool(b bool) bool {
func (p *exporter) index(marker byte, index int) {
if index < 0 {
- log.Fatalf("gcimporter: invalid index < 0")
+ panic(internalError("invalid index < 0"))
}
if debugFormat {
p.marker('t')
@@ -666,7 +690,7 @@ func (p *exporter) index(marker byte, index int) {
func (p *exporter) tag(tag int) {
if tag >= 0 {
- log.Fatalf("gcimporter: invalid tag >= 0")
+ panic(internalError("invalid tag >= 0"))
}
if debugFormat {
p.marker('t')
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/bimport.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/bimport.go
similarity index 82%
rename from _tools/src/golang.org/x/tools/go/gcimporter15/bimport.go
rename to _tools/src/golang.org/x/tools/go/internal/gcimporter/bimport.go
index 1936a7f..e3c3107 100644
--- a/_tools/src/golang.org/x/tools/go/gcimporter15/bimport.go
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/bimport.go
@@ -39,8 +39,7 @@ type importer struct {
posInfoFormat bool
prevFile string
prevLine int
- fset *token.FileSet
- files map[string]*token.File
+ fake fakeFileSet
// debugging support
debugFormat bool
@@ -53,12 +52,16 @@ type importer struct {
// compromised, an error is returned.
func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
// catch panics and return them as errors
+ const currentVersion = 6
+ version := -1 // unknown version
defer func() {
if e := recover(); e != nil {
- // The package (filename) causing the problem is added to this
- // error by a wrapper in the caller (Import in gcimporter.go).
// Return a (possibly nil or incomplete) package unchanged (see #16088).
- err = fmt.Errorf("cannot import, possibly version skew (%v) - reinstall package", e)
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
}
}()
@@ -66,11 +69,13 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
imports: imports,
data: data,
importpath: path,
- version: -1, // unknown version
+ version: version,
strList: []string{""}, // empty string is mapped to 0
pathList: []string{""}, // empty string is mapped to 0
- fset: fset,
- files: make(map[string]*token.File),
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
}
// read version info
@@ -89,7 +94,7 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
p.posInfoFormat = p.int() != 0
versionstr = p.string()
if versionstr == "v1" {
- p.version = 0
+ version = 0
}
} else {
// Go1.8 extensible encoding
@@ -97,35 +102,36 @@ func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []
versionstr = p.rawStringln(b)
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- p.version = v
+ version = v
}
}
}
+ p.version = version
// read version specific flags - extend as necessary
switch p.version {
- // case 6:
+ // case currentVersion:
// ...
// fallthrough
- case 5, 4, 3, 2, 1:
+ case currentVersion, 5, 4, 3, 2, 1:
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
p.trackAllTypes = p.int() != 0
p.posInfoFormat = p.int() != 0
case 0:
// Go1.7 encoding format - nothing to do here
default:
- errorf("unknown export format version %d (%q)", p.version, versionstr)
+ errorf("unknown bexport format version %d (%q)", p.version, versionstr)
}
// --- generic export data ---
// populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared...)
+ p.typList = append(p.typList, predeclared()...)
// read package data
pkg = p.pkg()
- // read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
+ // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go)
objcount := 0
for {
tag := p.tagOrIndex()
@@ -184,6 +190,9 @@ func (p *importer) pkg() *types.Package {
} else {
path = p.string()
}
+ if p.version >= 6 {
+ p.int() // package height; unused by go/types
+ }
// we should never see an empty package name
if name == "" {
@@ -259,7 +268,7 @@ func (p *importer) obj(tag int) {
case constTag:
pos := p.pos()
pkg, name := p.qualifiedName()
- typ := p.typ(nil)
+ typ := p.typ(nil, nil)
val := p.value()
p.declare(types.NewConst(pos, pkg, name, typ, val))
@@ -267,16 +276,16 @@ func (p *importer) obj(tag int) {
// TODO(gri) verify type alias hookup is correct
pos := p.pos()
pkg, name := p.qualifiedName()
- typ := p.typ(nil)
+ typ := p.typ(nil, nil)
p.declare(types.NewTypeName(pos, pkg, name, typ))
case typeTag:
- p.typ(nil)
+ p.typ(nil, nil)
case varTag:
pos := p.pos()
pkg, name := p.qualifiedName()
- typ := p.typ(nil)
+ typ := p.typ(nil, nil)
p.declare(types.NewVar(pos, pkg, name, typ))
case funcTag:
@@ -323,15 +332,23 @@ func (p *importer) pos() token.Pos {
p.prevFile = file
p.prevLine = line
- // Synthesize a token.Pos
+ return p.fake.pos(file, line)
+}
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+func (s *fakeFileSet) pos(file string, line int) token.Pos {
// Since we don't know the set of needed file positions, we
// reserve maxlines positions per file.
const maxlines = 64 * 1024
- f := p.files[file]
+ f := s.files[file]
if f == nil {
- f = p.fset.AddFile(file, -1, maxlines)
- p.files[file] = f
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
// Allocate the fake linebreak indices on first use.
// TODO(adonovan): opt: save ~512KB using a more complex scheme?
fakeLinesOnce.Do(func() {
@@ -381,7 +398,11 @@ func (t *dddSlice) String() string { return "..." + t.elem.String() }
// the package currently imported. The parent package is needed for
// exported struct fields and interface methods which don't contain
// explicit package information in the export data.
-func (p *importer) typ(parent *types.Package) types.Type {
+//
+// A non-nil tname is used as the "owner" of the result type; i.e.,
+// the result type is the underlying type of tname. tname is used
+// to give interface methods a named receiver type where possible.
+func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type {
// if the type was seen before, i is its index (>= 0)
i := p.tagOrIndex()
if i >= 0 {
@@ -411,15 +432,15 @@ func (p *importer) typ(parent *types.Package) types.Type {
t0 := types.NewNamed(obj.(*types.TypeName), nil, nil)
// but record the existing type, if any
- t := obj.Type().(*types.Named)
- p.record(t)
+ tname := obj.Type().(*types.Named) // tname is either t0 or the existing type
+ p.record(tname)
// read underlying type
- t0.SetUnderlying(p.typ(parent))
+ t0.SetUnderlying(p.typ(parent, t0))
// interfaces don't have associated methods
if types.IsInterface(t0) {
- return t
+ return tname
}
// read associated methods
@@ -440,7 +461,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
t0.AddMethod(types.NewFunc(pos, parent, name, sig))
}
- return t
+ return tname
case arrayTag:
t := new(types.Array)
@@ -449,7 +470,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
}
n := p.int64()
- *t = *types.NewArray(p.typ(parent), n)
+ *t = *types.NewArray(p.typ(parent, nil), n)
return t
case sliceTag:
@@ -458,7 +479,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
p.record(t)
}
- *t = *types.NewSlice(p.typ(parent))
+ *t = *types.NewSlice(p.typ(parent, nil))
return t
case dddTag:
@@ -467,7 +488,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
p.record(t)
}
- t.elem = p.typ(parent)
+ t.elem = p.typ(parent, nil)
return t
case structTag:
@@ -485,7 +506,7 @@ func (p *importer) typ(parent *types.Package) types.Type {
p.record(t)
}
- *t = *types.NewPointer(p.typ(parent))
+ *t = *types.NewPointer(p.typ(parent, nil))
return t
case signatureTag:
@@ -504,18 +525,20 @@ func (p *importer) typ(parent *types.Package) types.Type {
// cannot expect the interface type to appear in a cycle, as any
// such cycle must contain a named type which would have been
// first defined earlier.
+ // TODO(gri) Is this still true now that we have type aliases?
+ // See issue #23225.
n := len(p.typList)
if p.trackAllTypes {
p.record(nil)
}
- var embeddeds []*types.Named
+ var embeddeds []types.Type
for n := p.int(); n > 0; n-- {
p.pos()
- embeddeds = append(embeddeds, p.typ(parent).(*types.Named))
+ embeddeds = append(embeddeds, p.typ(parent, nil))
}
- t := types.NewInterface(p.methodList(parent), embeddeds)
+ t := newInterface(p.methodList(parent, tname), embeddeds)
p.interfaceList = append(p.interfaceList, t)
if p.trackAllTypes {
p.typList[n] = t
@@ -528,8 +551,8 @@ func (p *importer) typ(parent *types.Package) types.Type {
p.record(t)
}
- key := p.typ(parent)
- val := p.typ(parent)
+ key := p.typ(parent, nil)
+ val := p.typ(parent, nil)
*t = *types.NewMap(key, val)
return t
@@ -539,19 +562,8 @@ func (p *importer) typ(parent *types.Package) types.Type {
p.record(t)
}
- var dir types.ChanDir
- // tag values must match the constants in cmd/compile/internal/gc/go.go
- switch d := p.int(); d {
- case 1 /* Crecv */ :
- dir = types.RecvOnly
- case 2 /* Csend */ :
- dir = types.SendOnly
- case 3 /* Cboth */ :
- dir = types.SendRecv
- default:
- errorf("unexpected channel dir %d", d)
- }
- val := p.typ(parent)
+ dir := chanDir(p.int())
+ val := p.typ(parent, nil)
*t = *types.NewChan(dir, val)
return t
@@ -561,6 +573,21 @@ func (p *importer) typ(parent *types.Package) types.Type {
}
}
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) {
if n := p.int(); n > 0 {
fields = make([]*types.Var, n)
@@ -575,7 +602,7 @@ func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags [
func (p *importer) field(parent *types.Package) (*types.Var, string) {
pos := p.pos()
pkg, name, alias := p.fieldName(parent)
- typ := p.typ(parent)
+ typ := p.typ(parent, nil)
tag := p.string()
anonymous := false
@@ -599,22 +626,30 @@ func (p *importer) field(parent *types.Package) (*types.Var, string) {
return types.NewField(pos, pkg, name, typ, anonymous), tag
}
-func (p *importer) methodList(parent *types.Package) (methods []*types.Func) {
+func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) {
if n := p.int(); n > 0 {
methods = make([]*types.Func, n)
for i := range methods {
- methods[i] = p.method(parent)
+ methods[i] = p.method(parent, baseType)
}
}
return
}
-func (p *importer) method(parent *types.Package) *types.Func {
+func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func {
pos := p.pos()
pkg, name, _ := p.fieldName(parent)
+ // If we don't have a baseType, use a nil receiver.
+ // A receiver using the actual interface type (which
+ // we don't know yet) will be filled in when we call
+ // types.Interface.Complete.
+ var recv *types.Var
+ if baseType != nil {
+ recv = types.NewVar(token.NoPos, parent, "", baseType)
+ }
params, isddd := p.paramList()
result, _ := p.paramList()
- sig := types.NewSignature(nil, params, result, isddd)
+ sig := types.NewSignature(recv, params, result, isddd)
return types.NewFunc(pos, pkg, name, sig)
}
@@ -670,7 +705,7 @@ func (p *importer) paramList() (*types.Tuple, bool) {
}
func (p *importer) param(named bool) (*types.Var, bool) {
- t := p.typ(nil)
+ t := p.typ(nil, nil)
td, isddd := t.(*dddSlice)
if isddd {
t = types.NewSlice(td.elem)
@@ -941,50 +976,58 @@ const (
aliasTag
)
-var predeclared = []types.Type{
- // basic types
- types.Typ[types.Bool],
- types.Typ[types.Int],
- types.Typ[types.Int8],
- types.Typ[types.Int16],
- types.Typ[types.Int32],
- types.Typ[types.Int64],
- types.Typ[types.Uint],
- types.Typ[types.Uint8],
- types.Typ[types.Uint16],
- types.Typ[types.Uint32],
- types.Typ[types.Uint64],
- types.Typ[types.Uintptr],
- types.Typ[types.Float32],
- types.Typ[types.Float64],
- types.Typ[types.Complex64],
- types.Typ[types.Complex128],
- types.Typ[types.String],
-
- // basic type aliases
- types.Universe.Lookup("byte").Type(),
- types.Universe.Lookup("rune").Type(),
-
- // error
- types.Universe.Lookup("error").Type(),
-
- // untyped types
- types.Typ[types.UntypedBool],
- types.Typ[types.UntypedInt],
- types.Typ[types.UntypedRune],
- types.Typ[types.UntypedFloat],
- types.Typ[types.UntypedComplex],
- types.Typ[types.UntypedString],
- types.Typ[types.UntypedNil],
-
- // package unsafe
- types.Typ[types.UnsafePointer],
-
- // invalid type
- types.Typ[types.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- anyType{},
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ }
+ return predecl
}
type anyType struct{}
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/exportdata.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/exportdata.go
similarity index 100%
rename from _tools/src/golang.org/x/tools/go/gcimporter15/exportdata.go
rename to _tools/src/golang.org/x/tools/go/internal/gcimporter/exportdata.go
diff --git a/_tools/src/golang.org/x/tools/go/gcimporter15/gcimporter.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
similarity index 90%
rename from _tools/src/golang.org/x/tools/go/gcimporter15/gcimporter.go
rename to _tools/src/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
index 6fbc9d7..9cf1866 100644
--- a/_tools/src/golang.org/x/tools/go/gcimporter15/gcimporter.go
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
@@ -2,25 +2,21 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
+// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
// but it also contains the original source-based importer code for Go1.6.
// Once we stop supporting 1.6, we can remove that code.
-// Package gcimporter15 provides various functions for reading
+// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
-//
-// Deprecated: this package will be deleted in October 2017.
-// New code should use golang.org/x/tools/go/gcexportdata.
-//
-package gcimporter // import "golang.org/x/tools/go/gcimporter15"
+package gcimporter // import "golang.org/x/tools/go/internal/gcimporter"
import (
"bufio"
"errors"
"fmt"
"go/build"
- exact "go/constant"
+ "go/constant"
"go/token"
"go/types"
"io"
@@ -59,6 +55,7 @@ func FindPkg(path, srcDir string) (filename, id string) {
}
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
+ id = path // make sure we have an id to print in error message
return
}
noext = strings.TrimSuffix(bp.PkgObj, ".a")
@@ -131,51 +128,91 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
-func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types.Package, err error) {
- filename, id := FindPkg(path, srcDir)
- if filename == "" {
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
if path == "unsafe" {
return types.Unsafe, nil
}
- err = fmt.Errorf("can't find import: %s", id)
- return
- }
+ id = path
- // no need to re-import if the package was imported completely before
- if pkg = packages[id]; pkg != nil && pkg.Complete() {
- return
- }
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
- // open file
- f, err := os.Open(filename)
- if err != nil {
- return
- }
- defer func() {
- f.Close()
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
if err != nil {
- // add file name to error
- err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ return nil, err
}
- }()
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
var hdr string
- buf := bufio.NewReader(f)
+ buf := bufio.NewReader(rc)
if hdr, err = FindExportData(buf); err != nil {
return
}
switch hdr {
case "$$\n":
+ // Work-around if we don't have a filename; happens only if lookup != nil.
+ // Either way, the filename is only needed for importer error messages, so
+ // this is fine.
+ if filename == "" {
+ filename = path
+ }
return ImportData(packages, filename, id, buf)
+
case "$$B\n":
var data []byte
data, err = ioutil.ReadAll(buf)
- if err == nil {
- fset := token.NewFileSet()
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ _, pkg, err = IImportData(fset, packages, data[1:], id)
+ } else {
_, pkg, err = BImportData(fset, packages, data, id)
- return
}
+
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
}
@@ -767,9 +804,9 @@ func (p *parser) parseInt() string {
// number = int_lit [ "p" int_lit ] .
//
-func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
+func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
// mantissa
- mant := exact.MakeFromLiteral(p.parseInt(), token.INT, 0)
+ mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
if mant == nil {
panic("invalid mantissa")
}
@@ -782,14 +819,14 @@ func (p *parser) parseNumber() (typ *types.Basic, val exact.Value) {
p.error(err)
}
if exp < 0 {
- denom := exact.MakeInt64(1)
- denom = exact.Shift(denom, token.SHL, uint(-exp))
+ denom := constant.MakeInt64(1)
+ denom = constant.Shift(denom, token.SHL, uint(-exp))
typ = types.Typ[types.UntypedFloat]
- val = exact.BinaryOp(mant, token.QUO, denom)
+ val = constant.BinaryOp(mant, token.QUO, denom)
return
}
if exp > 0 {
- mant = exact.Shift(mant, token.SHL, uint(exp))
+ mant = constant.Shift(mant, token.SHL, uint(exp))
}
typ = types.Typ[types.UntypedFloat]
val = mant
@@ -820,7 +857,7 @@ func (p *parser) parseConstDecl() {
p.expect('=')
var typ types.Type
- var val exact.Value
+ var val constant.Value
switch p.tok {
case scanner.Ident:
// bool_lit
@@ -828,7 +865,7 @@ func (p *parser) parseConstDecl() {
p.error("expected true or false")
}
typ = types.Typ[types.UntypedBool]
- val = exact.MakeBool(p.lit == "true")
+ val = constant.MakeBool(p.lit == "true")
p.next()
case '-', scanner.Int:
@@ -852,18 +889,18 @@ func (p *parser) parseConstDecl() {
p.expectKeyword("i")
p.expect(')')
typ = types.Typ[types.UntypedComplex]
- val = exact.BinaryOp(re, token.ADD, exact.MakeImag(im))
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
case scanner.Char:
// rune_lit
typ = types.Typ[types.UntypedRune]
- val = exact.MakeFromLiteral(p.lit, token.CHAR, 0)
+ val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
p.next()
case scanner.String:
// string_lit
typ = types.Typ[types.UntypedString]
- val = exact.MakeFromLiteral(p.lit, token.STRING, 0)
+ val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
p.next()
default:
diff --git a/_tools/src/golang.org/x/tools/go/internal/gcimporter/iexport.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/iexport.go
new file mode 100644
index 0000000..be671c7
--- /dev/null
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/iexport.go
@@ -0,0 +1,723 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+// +build go1.11
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "go/ast"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 0: Go1.11 encoding
+const iexportVersion = 0
+
+// IExportData returns the binary export data for pkg.
+// If no file set is provided, position info will be missing.
+func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+
+ p := iexporter{
+ out: bytes.NewBuffer(nil),
+ fset: fset,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ typIndex: map[types.Type]uint64{},
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if ast.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex, pkg)
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ hdr.WriteByte('i')
+ hdr.uint64(iexportVersion)
+ hdr.uint64(uint64(p.strings.Len()))
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(p.out, &hdr)
+ io.Copy(p.out, &p.strings)
+ io.Copy(p.out, &p.data0)
+
+ return p.out.Bytes(), nil
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]types.Object{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ pkgObjs[localpkg] = nil
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].Name() < objs[j].Name()
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].Path() < pkgs[j].Path()
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(pkg.Path())
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.Name())
+ w.uint64(index[obj])
+ }
+ }
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ typIndex map[types.Type]uint64
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ assert(obj.Pkg() != types.Unsafe)
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark n present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ w := p.newWriter()
+ w.setPkg(obj.Pkg(), false)
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag('V')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ w.tag('F')
+ w.pos(obj.Pos())
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag('C')
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ if obj.IsAlias() {
+ w.tag('A')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ w.tag('T')
+ w.pos(obj.Pos())
+
+ underlying := obj.Type().Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ t := obj.Type()
+ if types.IsInterface(t) {
+ break
+ }
+
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(pkg.Path())
+}
+
+func (w *exportWriter) qualifiedIdent(obj types.Object) {
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+
+ w.string(obj.Name())
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ switch t := t.(type) {
+ case *types.Named:
+ w.startType(definedType)
+ w.qualifiedIdent(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.setPkg(pkg, true)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ w.setPkg(pkg, true)
+
+ n := t.NumFields()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ w.pos(f.Pos())
+ w.string(f.Name())
+ w.typ(f.Type(), pkg)
+ w.bool(f.Embedded())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.setPkg(pkg, true)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ f := t.Embedded(i)
+ w.pos(f.Obj().Pos())
+ w.typ(f.Obj().Type(), f.Obj().Pkg())
+ }
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+
+ switch v.Kind() {
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.Int:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case constant.Float:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case constant.Complex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Unknown:
+ // package contains type errors
+ default:
+ panic(internalErrorf("unexpected value %v (%T)", v, v))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ assert(x.Kind() == constant.Float)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
diff --git a/_tools/src/golang.org/x/tools/go/internal/gcimporter/iimport.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/iimport.go
new file mode 100644
index 0000000..3cb7ae5
--- /dev/null
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/iimport.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "sort"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+)
+
+// IImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ const currentVersion = 0
+ version := -1
+ defer func() {
+ if e := recover(); e != nil {
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ version = int(r.uint64())
+ switch version {
+ case currentVersion:
+ default:
+ errorf("unknown iexport format version %d", version)
+ }
+
+ sLen := int64(r.uint64())
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ r.Seek(sLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ ipath: path,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*token.File),
+ },
+ }
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types.NewPackage(pkgPath, pkgName)
+ imports[pkgPath] = pkg
+ } else if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ nameIndex := make(map[string]uint64)
+ for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+ var localpkg *types.Package
+ for _, pkg := range pkgList {
+ if pkg.Path() == path {
+ localpkg = pkg
+ }
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[localpkg]))
+ for name := range p.pkgIndex[localpkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(localpkg, name)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ localpkg.SetImports(list)
+
+ // package was imported completely and without errors
+ localpkg.MarkComplete()
+
+ consumed, _ := r.Seek(0, io.SeekCurrent)
+ return int(consumed), localpkg, nil
+}
+
+type iimporter struct {
+ ipath string
+
+ stringData []byte
+ stringCache map[uint64]string
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if base == nil || !isInterface(t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F':
+ sig := r.signature(nil)
+
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ r.declare(obj)
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+ msig := r.signature(recv)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ val = r.mpint(b)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(b *types.Basic) constant.Value {
+ signed, maxBytes := intSize(b)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ return constant.MakeInt64(v)
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+
+ buf := make([]byte, v)
+ io.ReadFull(&r.declReader, buf)
+
+ // convert to little endian
+ // TODO(gri) go/constant should have a more direct conversion function
+ // (e.g., once it supports a big.Float based implementation)
+ for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
+ buf[i], buf[j] = buf[j], buf[i]
+ }
+
+ x := constant.MakeFromBytes(buf)
+ if signed && n&1 != 0 {
+ x = constant.UnaryOp(token.SUB, x, 0)
+ }
+ return x
+}
+
+func (r *importReader) mpfloat(b *types.Basic) constant.Value {
+ x := r.mpint(b)
+ if constant.Sign(x) == 0 {
+ return x
+ }
+
+ exp := r.int64()
+ switch {
+ case exp > 0:
+ x = constant.Shift(x, token.SHL, uint(exp))
+ case exp < 0:
+ d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
+ x = constant.BinaryOp(x, token.QUO, d)
+ }
+ return x
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 {
+ return token.NoPos
+ }
+
+ return r.p.fake.pos(r.prevFile, int(r.prevLine))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := t.(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) types.Type {
+ switch k := r.kind(); k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv)
+ methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Var) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignature(recv, params, results, variadic)
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
diff --git a/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
new file mode 100644
index 0000000..463f252
--- /dev/null
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface10.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ named := make([]*types.Named, len(embeddeds))
+ for i, e := range embeddeds {
+ var ok bool
+ named[i], ok = e.(*types.Named)
+ if !ok {
+ panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
+ }
+ }
+ return types.NewInterface(methods, named)
+}
diff --git a/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
new file mode 100644
index 0000000..ab28b95
--- /dev/null
+++ b/_tools/src/golang.org/x/tools/go/internal/gcimporter/newInterface11.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ return types.NewInterfaceType(methods, embeddeds)
+}
diff --git a/_tools/src/golang.org/x/tools/go/loader/loader.go b/_tools/src/golang.org/x/tools/go/loader/loader.go
index de756f7..de34b80 100644
--- a/_tools/src/golang.org/x/tools/go/loader/loader.go
+++ b/_tools/src/golang.org/x/tools/go/loader/loader.go
@@ -22,6 +22,7 @@ import (
"time"
"golang.org/x/tools/go/ast/astutil"
+ "golang.org/x/tools/go/internal/cgo"
)
var ignoreVendor build.ImportMode
@@ -754,7 +755,7 @@ func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.Fil
// Preprocess CgoFiles and parse the outputs (sequentially).
if which == 'g' && bp.CgoFiles != nil {
- cgofiles, err := processCgoFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
+ cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
if err != nil {
errs = append(errs, err)
} else {
@@ -779,7 +780,7 @@ func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, err
if to == "C" {
// This should be unreachable, but ad hoc packages are
// not currently subject to cgo preprocessing.
- // See https://github.com/golang/go/issues/11627.
+ // See https://golang.org/issue/11627.
return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
from.Pkg.Path())
}
diff --git a/tools.json b/tools.json
index ab90cf0..35eb330 100644
--- a/tools.json
+++ b/tools.json
@@ -1,8 +1,8 @@
{
"Tools": [
{
- "Repository": "github.com/golang/lint/golint",
- "Commit": "cb00e5669539f047b2f4c53a421a01b0c8e172c6"
+ "Repository": "golang.org/x/lint/golint",
+ "Commit": "959b441ac422379a43da2230f62be024250818b0"
},
{
"Repository": "github.com/tsenart/deadcode",
@@ -35,7 +35,11 @@
{
"Repository": "github.com/mitchellh/gox",
"Commit": "c9740af9c6574448fd48eb30a71f964014c7a837"
+ },
+ {
+ "Repository": "github.com/jteeuwen/go-bindata/go-bindata",
+ "Commit": "6025e8de665b31fa74ab1a66f2cddd8c0abf887e"
}
],
- "RetoolVersion": "1.3.5"
+ "RetoolVersion": "1.3.7"
}
\ No newline at end of file