mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
[dev.ssa] Merge remote-tracking branch 'origin/master' into mergebranch
Semi-regular merge from tip to dev.ssa. Conflicts: src/runtime/sys_windows_amd64.s Change-Id: I5f733130049c810e6ceacd46dad85faebca52b29
This commit is contained in:
commit
23d5810c8f
218 changed files with 4017 additions and 811 deletions
5
AUTHORS
5
AUTHORS
|
|
@ -70,6 +70,7 @@ Andrew Williams <williams.andrew@gmail.com>
|
||||||
Andrey Mirtchovski <mirtchovski@gmail.com>
|
Andrey Mirtchovski <mirtchovski@gmail.com>
|
||||||
Andrey Petrov <andrey.petrov@shazow.net>
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
Andriy Lytvynov <lytvynov.a.v@gmail.com>
|
Andriy Lytvynov <lytvynov.a.v@gmail.com>
|
||||||
|
Andy Balholm <andy@balholm.com>
|
||||||
Andy Davis <andy@bigandian.com>
|
Andy Davis <andy@bigandian.com>
|
||||||
Andy Maloney <asmaloney@gmail.com>
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||||
|
|
@ -142,6 +143,7 @@ Christopher Guiney <chris@guiney.net>
|
||||||
Christopher Nielsen <m4dh4tt3r@gmail.com>
|
Christopher Nielsen <m4dh4tt3r@gmail.com>
|
||||||
Christopher Redden <christopher.redden@gmail.com>
|
Christopher Redden <christopher.redden@gmail.com>
|
||||||
Christopher Wedgwood <cw@f00f.org>
|
Christopher Wedgwood <cw@f00f.org>
|
||||||
|
CL Sung <clsung@gmail.com> <cl_sung@htc.com>
|
||||||
Clement Skau <clementskau@gmail.com>
|
Clement Skau <clementskau@gmail.com>
|
||||||
CloudFlare Inc.
|
CloudFlare Inc.
|
||||||
Colin Kennedy <moshen.colin@gmail.com>
|
Colin Kennedy <moshen.colin@gmail.com>
|
||||||
|
|
@ -187,6 +189,7 @@ Devon H. O'Dell <devon.odell@gmail.com>
|
||||||
Dhiru Kholia <dhiru.kholia@gmail.com>
|
Dhiru Kholia <dhiru.kholia@gmail.com>
|
||||||
Didier Spezia <didier.06@gmail.com>
|
Didier Spezia <didier.06@gmail.com>
|
||||||
Dimitri Tcaciuc <dtcaciuc@gmail.com>
|
Dimitri Tcaciuc <dtcaciuc@gmail.com>
|
||||||
|
Dirk Gadsden <dirk@esherido.com>
|
||||||
Dmitri Shuralyov <shurcooL@gmail.com>
|
Dmitri Shuralyov <shurcooL@gmail.com>
|
||||||
Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
||||||
Dmitry Chestnykh <dchest@gmail.com>
|
Dmitry Chestnykh <dchest@gmail.com>
|
||||||
|
|
@ -526,6 +529,7 @@ Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
Pietro Gagliardi <pietro10@mac.com>
|
Pietro Gagliardi <pietro10@mac.com>
|
||||||
Preetam Jinka <pj@preet.am>
|
Preetam Jinka <pj@preet.am>
|
||||||
Quan Yong Zhai <qyzhai@gmail.com>
|
Quan Yong Zhai <qyzhai@gmail.com>
|
||||||
|
Quentin Perez <qperez@ocs.online.net>
|
||||||
Quoc-Viet Nguyen <afelion@gmail.com>
|
Quoc-Viet Nguyen <afelion@gmail.com>
|
||||||
RackTop Systems Inc.
|
RackTop Systems Inc.
|
||||||
Raif S. Naffah <go@naffah-raif.name>
|
Raif S. Naffah <go@naffah-raif.name>
|
||||||
|
|
@ -644,6 +648,7 @@ William Josephson <wjosephson@gmail.com>
|
||||||
William Orr <will@worrbase.com> <ay1244@gmail.com>
|
William Orr <will@worrbase.com> <ay1244@gmail.com>
|
||||||
Xia Bin <snyh@snyh.org>
|
Xia Bin <snyh@snyh.org>
|
||||||
Xing Xing <mikespook@gmail.com>
|
Xing Xing <mikespook@gmail.com>
|
||||||
|
Yahoo Inc.
|
||||||
Yann Kerhervé <yann.kerherve@gmail.com>
|
Yann Kerhervé <yann.kerherve@gmail.com>
|
||||||
Yao Zhang <lunaria21@gmail.com>
|
Yao Zhang <lunaria21@gmail.com>
|
||||||
Yasuharu Goto <matope.ono@gmail.com>
|
Yasuharu Goto <matope.ono@gmail.com>
|
||||||
|
|
|
||||||
|
|
@ -103,6 +103,7 @@ Andrew Williams <williams.andrew@gmail.com>
|
||||||
Andrey Mirtchovski <mirtchovski@gmail.com>
|
Andrey Mirtchovski <mirtchovski@gmail.com>
|
||||||
Andrey Petrov <andrey.petrov@shazow.net>
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
Andriy Lytvynov <lytvynov.a.v@gmail.com>
|
Andriy Lytvynov <lytvynov.a.v@gmail.com>
|
||||||
|
Andy Balholm <andy@balholm.com>
|
||||||
Andy Davis <andy@bigandian.com>
|
Andy Davis <andy@bigandian.com>
|
||||||
Andy Maloney <asmaloney@gmail.com>
|
Andy Maloney <asmaloney@gmail.com>
|
||||||
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
Anfernee Yongkun Gui <anfernee.gui@gmail.com>
|
||||||
|
|
@ -203,8 +204,10 @@ Christopher Nielsen <m4dh4tt3r@gmail.com>
|
||||||
Christopher Redden <christopher.redden@gmail.com>
|
Christopher Redden <christopher.redden@gmail.com>
|
||||||
Christopher Swenson <cswenson@google.com>
|
Christopher Swenson <cswenson@google.com>
|
||||||
Christopher Wedgwood <cw@f00f.org>
|
Christopher Wedgwood <cw@f00f.org>
|
||||||
|
CL Sung <clsung@gmail.com> <cl_sung@htc.com>
|
||||||
Clement Skau <clementskau@gmail.com>
|
Clement Skau <clementskau@gmail.com>
|
||||||
Colby Ranger <cranger@google.com>
|
Colby Ranger <cranger@google.com>
|
||||||
|
Colin Cross <ccross@android.com>
|
||||||
Colin Kennedy <moshen.colin@gmail.com>
|
Colin Kennedy <moshen.colin@gmail.com>
|
||||||
Conrad Meyer <cemeyer@cs.washington.edu>
|
Conrad Meyer <cemeyer@cs.washington.edu>
|
||||||
Corey Thomasson <cthom.lists@gmail.com>
|
Corey Thomasson <cthom.lists@gmail.com>
|
||||||
|
|
@ -260,11 +263,13 @@ Dean Prichard <dean.prichard@gmail.com>
|
||||||
Denis Bernard <db047h@gmail.com>
|
Denis Bernard <db047h@gmail.com>
|
||||||
Denis Brandolini <denis.brandolini@gmail.com>
|
Denis Brandolini <denis.brandolini@gmail.com>
|
||||||
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
Derek Buitenhuis <derek.buitenhuis@gmail.com>
|
||||||
|
Derek Che <drc@yahoo-inc.com>
|
||||||
Derek Parker <parkerderek86@gmail.com>
|
Derek Parker <parkerderek86@gmail.com>
|
||||||
Devon H. O'Dell <devon.odell@gmail.com>
|
Devon H. O'Dell <devon.odell@gmail.com>
|
||||||
Dhiru Kholia <dhiru.kholia@gmail.com>
|
Dhiru Kholia <dhiru.kholia@gmail.com>
|
||||||
Didier Spezia <didier.06@gmail.com>
|
Didier Spezia <didier.06@gmail.com>
|
||||||
Dimitri Tcaciuc <dtcaciuc@gmail.com>
|
Dimitri Tcaciuc <dtcaciuc@gmail.com>
|
||||||
|
Dirk Gadsden <dirk@esherido.com>
|
||||||
Dmitri Shuralyov <shurcooL@gmail.com>
|
Dmitri Shuralyov <shurcooL@gmail.com>
|
||||||
Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
Dmitriy Shelenin <deemok@googlemail.com> <deemok@gmail.com>
|
||||||
Dmitriy Vyukov <dvyukov@google.com>
|
Dmitriy Vyukov <dvyukov@google.com>
|
||||||
|
|
@ -702,6 +707,7 @@ Pieter Droogendijk <pieter@binky.org.uk>
|
||||||
Pietro Gagliardi <pietro10@mac.com>
|
Pietro Gagliardi <pietro10@mac.com>
|
||||||
Preetam Jinka <pj@preet.am>
|
Preetam Jinka <pj@preet.am>
|
||||||
Quan Yong Zhai <qyzhai@gmail.com>
|
Quan Yong Zhai <qyzhai@gmail.com>
|
||||||
|
Quentin Perez <qperez@ocs.online.net>
|
||||||
Quoc-Viet Nguyen <afelion@gmail.com>
|
Quoc-Viet Nguyen <afelion@gmail.com>
|
||||||
Rahul Chaudhry <rahulchaudhry@chromium.org>
|
Rahul Chaudhry <rahulchaudhry@chromium.org>
|
||||||
Raif S. Naffah <go@naffah-raif.name>
|
Raif S. Naffah <go@naffah-raif.name>
|
||||||
|
|
|
||||||
|
|
@ -178,13 +178,18 @@ pkg encoding/asn1, const TagUTCTime = 23
|
||||||
pkg encoding/asn1, const TagUTCTime ideal-int
|
pkg encoding/asn1, const TagUTCTime ideal-int
|
||||||
pkg encoding/asn1, const TagUTF8String = 12
|
pkg encoding/asn1, const TagUTF8String = 12
|
||||||
pkg encoding/asn1, const TagUTF8String ideal-int
|
pkg encoding/asn1, const TagUTF8String ideal-int
|
||||||
pkg go/build, const AllowVendor = 8
|
pkg go/build, const IgnoreVendor = 8
|
||||||
pkg go/build, const AllowVendor ImportMode
|
pkg go/build, const IgnoreVendor ImportMode
|
||||||
pkg go/build, type Package struct, InvalidGoFiles []string
|
pkg go/build, type Package struct, InvalidGoFiles []string
|
||||||
pkg go/constant, func ToComplex(Value) Value
|
pkg go/constant, func ToComplex(Value) Value
|
||||||
pkg go/constant, func ToFloat(Value) Value
|
pkg go/constant, func ToFloat(Value) Value
|
||||||
pkg go/constant, func ToInt(Value) Value
|
pkg go/constant, func ToInt(Value) Value
|
||||||
pkg go/constant, type Value interface, ExactString() string
|
pkg go/constant, type Value interface, ExactString() string
|
||||||
|
pkg go/types, method (*Package) SetName(string)
|
||||||
|
pkg go/types, type ImportMode int
|
||||||
|
pkg go/types, type ImporterFrom interface { Import, ImportFrom }
|
||||||
|
pkg go/types, type ImporterFrom interface, Import(string) (*Package, error)
|
||||||
|
pkg go/types, type ImporterFrom interface, ImportFrom(string, string, ImportMode) (*Package, error)
|
||||||
pkg html/template, func IsTrue(interface{}) (bool, bool)
|
pkg html/template, func IsTrue(interface{}) (bool, bool)
|
||||||
pkg html/template, method (*Template) DefinedTemplates() string
|
pkg html/template, method (*Template) DefinedTemplates() string
|
||||||
pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
|
pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
|
||||||
|
|
@ -241,6 +246,8 @@ pkg net/http, const StatusRequestHeaderFieldsTooLarge = 431
|
||||||
pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
|
pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
|
||||||
pkg net/http, const StatusTooManyRequests = 429
|
pkg net/http, const StatusTooManyRequests = 429
|
||||||
pkg net/http, const StatusTooManyRequests ideal-int
|
pkg net/http, const StatusTooManyRequests ideal-int
|
||||||
|
pkg net/http, const StatusUnavailableForLegalReasons = 451
|
||||||
|
pkg net/http, const StatusUnavailableForLegalReasons ideal-int
|
||||||
pkg net/http, type Transport struct, ExpectContinueTimeout time.Duration
|
pkg net/http, type Transport struct, ExpectContinueTimeout time.Duration
|
||||||
pkg net/http, type Transport struct, TLSNextProto map[string]func(string, *tls.Conn) RoundTripper
|
pkg net/http, type Transport struct, TLSNextProto map[string]func(string, *tls.Conn) RoundTripper
|
||||||
pkg net/http, var ErrSkipAltProtocol error
|
pkg net/http, var ErrSkipAltProtocol error
|
||||||
|
|
|
||||||
|
|
@ -510,6 +510,13 @@ the stack pointer may change during any function call:
|
||||||
even pointers to stack data must not be kept in local variables.
|
even pointers to stack data must not be kept in local variables.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Assembly functions should always be given Go prototypes,
|
||||||
|
both to provide pointer information for the arguments and results
|
||||||
|
and to let <code>go</code> <code>vet</code> check that
|
||||||
|
the offsets being used to access them are correct.
|
||||||
|
</p>
|
||||||
|
|
||||||
<h2 id="architectures">Architecture-specific details</h2>
|
<h2 id="architectures">Architecture-specific details</h2>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
||||||
|
|
@ -24,21 +24,31 @@ A similar explanation is available as a
|
||||||
|
|
||||||
<h2 id="Organization">Code organization</h2>
|
<h2 id="Organization">Code organization</h2>
|
||||||
|
|
||||||
|
<h3 id="Overview">Overview</h3>
|
||||||
|
|
||||||
|
<ul>
|
||||||
|
<li>Go programmers typically keep all their Go code in a single <i>workspace</i>.</li>
|
||||||
|
<li>A workspace contains many version control <i>repositories</i>
|
||||||
|
(managed by Git, for example).</li>
|
||||||
|
<li>Each repository contains one or more <i>packages</i>.</li>
|
||||||
|
<li>Each package consists of one or more Go source files in a single directory.</li>
|
||||||
|
<li>The path to a package's directory determines its <i>import path</i>.</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Note that this differs from other programming environments in which every
|
||||||
|
project has a separate workspace and workspaces are closely tied to version
|
||||||
|
control repositories.
|
||||||
|
</p>
|
||||||
|
|
||||||
<h3 id="Workspaces">Workspaces</h3>
|
<h3 id="Workspaces">Workspaces</h3>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
The <code>go</code> tool is designed to work with open source code maintained
|
|
||||||
in public repositories. Although you don't need to publish your code, the model
|
|
||||||
for how the environment is set up works the same whether you do or not.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Go code must be kept inside a <i>workspace</i>.
|
|
||||||
A workspace is a directory hierarchy with three directories at its root:
|
A workspace is a directory hierarchy with three directories at its root:
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
<li><code>src</code> contains Go source files organized into packages (one package per directory),
|
<li><code>src</code> contains Go source files,
|
||||||
<li><code>pkg</code> contains package objects, and
|
<li><code>pkg</code> contains package objects, and
|
||||||
<li><code>bin</code> contains executable commands.
|
<li><code>bin</code> contains executable commands.
|
||||||
</ul>
|
</ul>
|
||||||
|
|
@ -77,16 +87,25 @@ src/
|
||||||
stringutil/
|
stringutil/
|
||||||
reverse.go # package source
|
reverse.go # package source
|
||||||
reverse_test.go # test source
|
reverse_test.go # test source
|
||||||
|
<a href="https://golang.org/x/image/">golang.org/x/image/</a>
|
||||||
|
.git/ # Git repository metadata
|
||||||
|
bmp/
|
||||||
|
reader.go # package source
|
||||||
|
writer.go # package source
|
||||||
|
... (many more repositories and packages omitted) ...
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
This workspace contains one repository (<code>example</code>)
|
The tree above shows a workspace containing two repositories
|
||||||
comprising two commands (<code>hello</code> and <code>outyet</code>)
|
(<code>example</code> and <code>image</code>).
|
||||||
and one library (<code>stringutil</code>).
|
The <code>example</code> repository contains two commands (<code>hello</code>
|
||||||
|
and <code>outyet</code>) and one library (<code>stringutil</code>).
|
||||||
|
The <code>image</code> repository contains the <code>bmp</code> package
|
||||||
|
and <a href="https://godoc.org/golang.org/x/image">several others</a>.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
A typical workspace would contain many source repositories containing many
|
A typical workspace contains many source repositories containing many
|
||||||
packages and commands. Most Go programmers keep <i>all</i> their Go source code
|
packages and commands. Most Go programmers keep <i>all</i> their Go source code
|
||||||
and dependencies in a single workspace.
|
and dependencies in a single workspace.
|
||||||
</p>
|
</p>
|
||||||
|
|
@ -133,10 +152,16 @@ please see
|
||||||
<a href="/cmd/go/#hdr-GOPATH_environment_variable"><code>go help gopath</code></a>
|
<a href="/cmd/go/#hdr-GOPATH_environment_variable"><code>go help gopath</code></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h3 id="PackagePaths">Package paths</h3>
|
<h3 id="ImportPaths">Import paths</h3>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
The packages from the standard library are given short paths such as
|
An <i>import path</i> is a string that uniquely identifies a package.
|
||||||
|
A package's import path corresponds to its location inside a workspace
|
||||||
|
or in a remote repository (explained below).
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
The packages from the standard library are given short import paths such as
|
||||||
<code>"fmt"</code> and <code>"net/http"</code>.
|
<code>"fmt"</code> and <code>"net/http"</code>.
|
||||||
For your own packages, you must choose a base path that is unlikely to
|
For your own packages, you must choose a base path that is unlikely to
|
||||||
collide with future additions to the standard library or other external
|
collide with future additions to the standard library or other external
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,13 @@ See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.2">Go
|
||||||
1.5.2 milestone</a> on our issue tracker for details.
|
1.5.2 milestone</a> on our issue tracker for details.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
go1.5.3 (released 2016/01/13) includes a security fix to the <code>math/big</code> package
|
||||||
|
affecting the <code>crypto/tls</code> package.
|
||||||
|
See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.3">Go 1.5.3 milestone on our issue tracker</a>
|
||||||
|
and the <a href="https://golang.org/s/go153announce">release announcement</a> for details.
|
||||||
|
</p>
|
||||||
|
|
||||||
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
|
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
||||||
|
|
@ -471,6 +471,16 @@ and <code>"page"</code> blocks to reuse the formatting for another page.
|
||||||
|
|
||||||
<ul>
|
<ul>
|
||||||
|
|
||||||
|
<li>
|
||||||
|
The <a href="/pkg/archive/tar/"><code>archive/tar</code></a> package's
|
||||||
|
implementation corrects many bugs in rare corner cases of the file format.
|
||||||
|
One visible change is that the
|
||||||
|
<a href="/pkg/archive/tar/#Reader"><code>Reader</code></a> type's
|
||||||
|
<a href="/pkg/archive/tar/#Reader.Read"><code>Read</code></a> method
|
||||||
|
now presents the content of special file types as being empty,
|
||||||
|
returning <code>io.EOF</code> immediately.
|
||||||
|
</li>
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
In the <a href="/pkg/archive/zip/"><code>archive/zip</code></a> package, the
|
In the <a href="/pkg/archive/zip/"><code>archive/zip</code></a> package, the
|
||||||
<a href="/pkg/archive/zip/#Reader"><code>Reader</code></a> type now has a
|
<a href="/pkg/archive/zip/#Reader"><code>Reader</code></a> type now has a
|
||||||
|
|
@ -514,6 +524,12 @@ now report
|
||||||
<a href="/pkg/io/#EOF"><code>io.EOF</code></a>.
|
<a href="/pkg/io/#EOF"><code>io.EOF</code></a>.
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
|
<li>
|
||||||
|
The <a href="/pkg/crypto/cipher/"><code>crypto/cipher</code></a> package now
|
||||||
|
overwrites the destination buffer in the event of a GCM decryption failure.
|
||||||
|
This is to allow the AESNI code to avoid using a temporary buffer.
|
||||||
|
</li>
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
The <a href="/pkg/crypto/tls/"><code>crypto/tls</code></a> package
|
The <a href="/pkg/crypto/tls/"><code>crypto/tls</code></a> package
|
||||||
has a variety of minor changes.
|
has a variety of minor changes.
|
||||||
|
|
@ -568,6 +584,17 @@ Also in the <a href="/pkg/encoding/asn1/"><code>encoding/asn1</code></a> package
|
||||||
<a href="/pkg/encoding/asn1/#Unmarshal"><code>Unmarshal</code></a> now rejects various non-standard integer and length encodings.
|
<a href="/pkg/encoding/asn1/#Unmarshal"><code>Unmarshal</code></a> now rejects various non-standard integer and length encodings.
|
||||||
</li>
|
</li>
|
||||||
|
|
||||||
|
<li>
|
||||||
|
The <a href="/pkg/encoding/base64"><code>encoding/base64</code></a> package's
|
||||||
|
<a href="/pkg/encoding/base64/#Decoder"><code>Decoder</code></a> has been fixed
|
||||||
|
to process the final bytes of its input. Previously it processed as many four-byte tokens as
|
||||||
|
possible but ignore the remainder, up to three bytes.
|
||||||
|
The <code>Decoder</code> therefore now handles inputs in unpadded encodings (like
|
||||||
|
<a href="/pkg/encoding/base64/#RawURLEncoding">RawURLEncoding</a>) correctly,
|
||||||
|
but it also rejects inputs in padded encodings that are truncated or end with invalid bytes,
|
||||||
|
such as trailing spaces.
|
||||||
|
</li>
|
||||||
|
|
||||||
<li>
|
<li>
|
||||||
The <a href="/pkg/encoding/json/"><code>encoding/json</code></a> package
|
The <a href="/pkg/encoding/json/"><code>encoding/json</code></a> package
|
||||||
now checks the syntax of a
|
now checks the syntax of a
|
||||||
|
|
|
||||||
|
|
@ -1,46 +1,62 @@
|
||||||
<!--{
|
<!--{
|
||||||
"Title": "Getting Help",
|
"Title": "Help",
|
||||||
"Path": "/help/"
|
"Path": "/help/"
|
||||||
}-->
|
}-->
|
||||||
|
|
||||||
<img class="gopher" src="/doc/gopher/help.png"/>
|
|
||||||
|
|
||||||
<p>
|
|
||||||
Need help with Go? Try these resources.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<div id="manual-nav"></div>
|
<div id="manual-nav"></div>
|
||||||
|
|
||||||
<h3 id="faq"><a href="/doc/faq">Frequently Asked Questions (FAQ)</a></h3>
|
<h2 id="help">Get help</h2>
|
||||||
<p>Answers to common questions about Go.</p>
|
|
||||||
|
|
||||||
<h3 id="playground"><a href="/play">The Go Playground</a></h3>
|
<img class="gopher" src="/doc/gopher/help.png"/>
|
||||||
<p>A place to write, run, and share Go code.</p>
|
|
||||||
|
|
||||||
<h3 id="wiki"><a href="/wiki">The Go Wiki</a></h3>
|
<h3 id="mailinglist"><a href="https://groups.google.com/group/golang-nuts">Go Nuts Mailing List</a></h3>
|
||||||
<p>A wiki maintained by the Go community.</p>
|
|
||||||
|
|
||||||
<h3 id="mailinglist"><a href="//groups.google.com/group/golang-nuts">Go Nuts Mailing List</a></h3>
|
|
||||||
<p>
|
<p>
|
||||||
Search the <a href="//groups.google.com/group/golang-nuts">golang-nuts</a>
|
Search the <a href="https://groups.google.com/group/golang-nuts">golang-nuts</a>
|
||||||
archives and consult the <a href="/doc/go_faq.html">FAQ</a> and
|
archives and consult the <a href="/doc/go_faq.html">FAQ</a> and
|
||||||
<a href="//golang.org/wiki">wiki</a> before posting.
|
<a href="//golang.org/wiki">wiki</a> before posting.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 id="forum"><a href="https://forum.golangbridge.org/">Go Forum</a></h3>
|
||||||
|
<p>
|
||||||
|
The <a href="https://forum.golangbridge.org/">Go Forum</a> is an alternate discussion
|
||||||
|
forum for Go programmers.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h3 id="slack"><a href="https://blog.gopheracademy.com/gophers-slack-community/">Gopher Slack</a></h3>
|
||||||
|
<p>Get live support from the official Go slack channel.</p>
|
||||||
|
|
||||||
<h3 id="irc"><a href="irc:irc.freenode.net/go-nuts">Go IRC Channel</a></h3>
|
<h3 id="irc"><a href="irc:irc.freenode.net/go-nuts">Go IRC Channel</a></h3>
|
||||||
<p>Get live support at <b>#go-nuts</b> on <b>irc.freenode.net</b>, the official
|
<p>Get live support at <b>#go-nuts</b> on <b>irc.freenode.net</b>, the official
|
||||||
Go IRC channel.</p>
|
Go IRC channel.</p>
|
||||||
|
|
||||||
<h3 id="pluscom"><a href="https://plus.google.com/communities/114112804251407510571">The Go+ community</a></h3>
|
<h3 id="faq"><a href="/doc/faq">Frequently Asked Questions (FAQ)</a></h3>
|
||||||
<p>The Google+ community for Go enthusiasts.</p>
|
<p>Answers to common questions about Go.</p>
|
||||||
|
|
||||||
<h3 id="plus"><a href="https://plus.google.com/101406623878176903605/posts">The Go Programming Language at Google+</a></h3>
|
<h2 id="inform">Stay informed</h2>
|
||||||
<p>The Go project's Google+ page.</p>
|
|
||||||
|
|
||||||
<h3 id="twitter"><a href="//twitter.com/golang">@golang at Twitter</a></h3>
|
<h3 id="announce"><a href="https://groups.google.com/group/golang-announce">Go Announcements Mailing List</a></h3>
|
||||||
|
<p>
|
||||||
|
Subscribe to
|
||||||
|
<a href="https://groups.google.com/group/golang-announce">golang-announce</a>
|
||||||
|
for important announcements, such as the availability of new Go releases.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h3 id="blog"><a href="//blog.golang.org">Go Blog</a></h3>
|
||||||
|
<p>The Go project's official blog.</p>
|
||||||
|
|
||||||
|
<h3 id="twitter"><a href="https://twitter.com/golang">@golang at Twitter</a></h3>
|
||||||
<p>The Go project's official Twitter account.</p>
|
<p>The Go project's official Twitter account.</p>
|
||||||
<p>Tweeting about your problem with the <code>#golang</code> hashtag usually
|
|
||||||
generates some helpful responses.</p>
|
<h3 id="pluscom"><a href="https://plus.google.com/communities/114112804251407510571">Go+ community</a></h3>
|
||||||
|
<p>A Google+ community for Go enthusiasts.</p>
|
||||||
|
|
||||||
|
<h3 id="reddit"><a href="https://reddit.com/r/golang">golang sub-Reddit</a></h3>
|
||||||
|
<p>
|
||||||
|
The <a href="https://reddit.com/r/golang">golang sub-Reddit</a> is a place
|
||||||
|
for Go news and discussion.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<h2 id="community">Community resources</h2>
|
||||||
|
|
||||||
<h3 id="go_user_groups"><a href="/wiki/GoUserGroups">Go User Groups</a></h3>
|
<h3 id="go_user_groups"><a href="/wiki/GoUserGroups">Go User Groups</a></h3>
|
||||||
<p>
|
<p>
|
||||||
|
|
@ -48,6 +64,12 @@ Each month in places around the world, groups of Go programmers ("gophers")
|
||||||
meet to talk about Go. Find a chapter near you.
|
meet to talk about Go. Find a chapter near you.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<h3 id="playground"><a href="/play">Go Playground</a></h3>
|
||||||
|
<p>A place to write, run, and share Go code.</p>
|
||||||
|
|
||||||
|
<h3 id="wiki"><a href="/wiki">Go Wiki</a></h3>
|
||||||
|
<p>A wiki maintained by the Go community.</p>
|
||||||
|
|
||||||
<h3 id="conduct"><a href="/conduct">Code of Conduct</a></h3>
|
<h3 id="conduct"><a href="/conduct">Code of Conduct</a></h3>
|
||||||
<p>
|
<p>
|
||||||
Guidelines for participating in Go community spaces
|
Guidelines for participating in Go community spaces
|
||||||
|
|
|
||||||
|
|
@ -106,6 +106,16 @@ var ptrTests = []ptrTest{
|
||||||
body: `i := 0; p := &S{p:&i, s:[]unsafe.Pointer{nil}}; C.f(&p.s[0])`,
|
body: `i := 0; p := &S{p:&i, s:[]unsafe.Pointer{nil}}; C.f(&p.s[0])`,
|
||||||
fail: false,
|
fail: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// Passing the address of a slice of an array that is
|
||||||
|
// an element in a struct, with a type conversion.
|
||||||
|
name: "slice-ok-3",
|
||||||
|
c: `void f(void* p) {}`,
|
||||||
|
imports: []string{"unsafe"},
|
||||||
|
support: `type S struct { p *int; a [4]byte }`,
|
||||||
|
body: `i := 0; p := &S{p:&i}; s := p.a[:]; C.f(unsafe.Pointer(&s[0]))`,
|
||||||
|
fail: false,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
// Passing the address of a static variable with no
|
// Passing the address of a static variable with no
|
||||||
// pointers doesn't matter.
|
// pointers doesn't matter.
|
||||||
|
|
|
||||||
|
|
@ -110,13 +110,13 @@ func test7978(t *testing.T) {
|
||||||
go issue7978go()
|
go issue7978go()
|
||||||
// test in c code, before callback
|
// test in c code, before callback
|
||||||
issue7978wait(0, 1)
|
issue7978wait(0, 1)
|
||||||
issue7978check(t, "runtime.cgocall(", "", 1)
|
issue7978check(t, "_Cfunc_issue7978c(", "", 1)
|
||||||
// test in go code, during callback
|
// test in go code, during callback
|
||||||
issue7978wait(2, 3)
|
issue7978wait(2, 3)
|
||||||
issue7978check(t, "test.issue7978cb(", "test.issue7978go", 3)
|
issue7978check(t, "test.issue7978cb(", "test.issue7978go", 3)
|
||||||
// test in c code, after callback
|
// test in c code, after callback
|
||||||
issue7978wait(4, 5)
|
issue7978wait(4, 5)
|
||||||
issue7978check(t, "runtime.cgocall(", "runtime.cgocallback", 1)
|
issue7978check(t, "_Cfunc_issue7978c(", "_cgoexpwrap", 1)
|
||||||
// test in go code, after return from cgo
|
// test in go code, after return from cgo
|
||||||
issue7978wait(6, 7)
|
issue7978wait(6, 7)
|
||||||
issue7978check(t, "test.issue7978go(", "", 3)
|
issue7978check(t, "test.issue7978go(", "", 3)
|
||||||
|
|
|
||||||
|
|
@ -2,15 +2,44 @@
|
||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
#include <signal.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
#include "p.h"
|
#include "p.h"
|
||||||
#include "libgo.h"
|
#include "libgo.h"
|
||||||
|
|
||||||
|
static void (*oldHandler)(int, siginfo_t*, void*);
|
||||||
|
|
||||||
|
static void handler(int signo, siginfo_t* info, void* ctxt) {
|
||||||
|
if (oldHandler) {
|
||||||
|
oldHandler(signo, info, ctxt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int main(void) {
|
int main(void) {
|
||||||
|
struct sigaction sa;
|
||||||
|
struct sigaction osa;
|
||||||
int32_t res;
|
int32_t res;
|
||||||
|
|
||||||
|
// Install our own signal handler.
|
||||||
|
memset(&sa, 0, sizeof sa);
|
||||||
|
sa.sa_sigaction = handler;
|
||||||
|
sigemptyset(&sa.sa_mask);
|
||||||
|
sa.sa_flags = SA_ONSTACK | SA_SIGINFO;
|
||||||
|
memset(&osa, 0, sizeof osa);
|
||||||
|
sigemptyset(&osa.sa_mask);
|
||||||
|
if (sigaction(SIGSEGV, &sa, &osa) < 0) {
|
||||||
|
perror("sigaction");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
if (osa.sa_handler == SIG_DFL || (osa.sa_flags&SA_ONSTACK) == 0) {
|
||||||
|
fprintf(stderr, "Go runtime did not install signal handler\n");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
oldHandler = osa.sa_sigaction;
|
||||||
|
|
||||||
if (!DidInitRun()) {
|
if (!DidInitRun()) {
|
||||||
fprintf(stderr, "ERROR: buildmode=c-archive init should run\n");
|
fprintf(stderr, "ERROR: buildmode=c-archive init should run\n");
|
||||||
return 2;
|
return 2;
|
||||||
|
|
@ -21,6 +50,16 @@ int main(void) {
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure our signal handler is still the one in use.
|
||||||
|
if (sigaction(SIGSEGV, NULL, &sa) < 0) {
|
||||||
|
perror("sigaction check");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
if (sa.sa_sigaction != handler) {
|
||||||
|
fprintf(stderr, "ERROR: wrong signal handler: %p != %p\n", sa.sa_sigaction, handler);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
res = FromPkg();
|
res = FromPkg();
|
||||||
if (res != 1024) {
|
if (res != 1024) {
|
||||||
fprintf(stderr, "ERROR: FromPkg()=%d, want 1024\n", res);
|
fprintf(stderr, "ERROR: FromPkg()=%d, want 1024\n", res);
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ fi
|
||||||
# Directory where cgo headers and outputs will be installed.
|
# Directory where cgo headers and outputs will be installed.
|
||||||
# The installation directory format varies depending on the platform.
|
# The installation directory format varies depending on the platform.
|
||||||
installdir=pkg/${goos}_${goarch}_testcshared_shared
|
installdir=pkg/${goos}_${goarch}_testcshared_shared
|
||||||
if [ "${goos}/${goarch}" == "darwin/amd64" ]; then
|
if [ "${goos}" == "darwin" ]; then
|
||||||
installdir=pkg/${goos}_${goarch}_testcshared
|
installdir=pkg/${goos}_${goarch}_testcshared
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
# Copyright 2015 The Go Authors. All rights reserved.
|
# Copyright 2015 The Go Authors. All rights reserved.
|
||||||
# Use of this source code is governed by a BSD-style
|
# Use of this source code is governed by a BSD-style
|
||||||
# license that can be found in the LICENSE file.
|
# license that can be found in the LICENSE file.
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ if [ "$pattern" = "" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# put linux, nacl first in the target list to get all the architectures up front.
|
# put linux, nacl first in the target list to get all the architectures up front.
|
||||||
targets="$((ls runtime | sed -n 's/^rt0_\(.*\)_\(.*\)\.s/\1-\2/p'; echo linux-386-387 linux-arm-arm5) | sort | egrep -v android-arm | egrep "$pattern" | egrep 'linux|nacl')
|
targets="$((ls runtime | sed -n 's/^rt0_\(.*\)_\(.*\)\.s/\1-\2/p'; echo linux-386-387 linux-arm-arm5) | sort | sed -e 's|linux-mips64x|linux-mips64 linux-mips64le|' | egrep -v android-arm | egrep "$pattern" | egrep 'linux|nacl')
|
||||||
$(ls runtime | sed -n 's/^rt0_\(.*\)_\(.*\)\.s/\1-\2/p' | egrep -v 'android-arm|darwin-arm' | egrep "$pattern" | egrep -v 'linux|nacl')"
|
$(ls runtime | sed -n 's/^rt0_\(.*\)_\(.*\)\.s/\1-\2/p' | egrep -v 'android-arm|darwin-arm' | egrep "$pattern" | egrep -v 'linux|nacl')"
|
||||||
|
|
||||||
./make.bash || exit 1
|
./make.bash || exit 1
|
||||||
|
|
|
||||||
|
|
@ -125,11 +125,19 @@ The C types __int128_t and __uint128_t are represented by [16]byte.
|
||||||
To access a struct, union, or enum type directly, prefix it with
|
To access a struct, union, or enum type directly, prefix it with
|
||||||
struct_, union_, or enum_, as in C.struct_stat.
|
struct_, union_, or enum_, as in C.struct_stat.
|
||||||
|
|
||||||
|
The size of any C type T is available as C.sizeof_T, as in
|
||||||
|
C.sizeof_struct_stat.
|
||||||
|
|
||||||
As Go doesn't have support for C's union type in the general case,
|
As Go doesn't have support for C's union type in the general case,
|
||||||
C's union types are represented as a Go byte array with the same length.
|
C's union types are represented as a Go byte array with the same length.
|
||||||
|
|
||||||
Go structs cannot embed fields with C types.
|
Go structs cannot embed fields with C types.
|
||||||
|
|
||||||
|
Go code can not refer to zero-sized fields that occur at the end of
|
||||||
|
non-empty C structs. To get the address of such a field (which is the
|
||||||
|
only operation you can do with a zero-sized field) you must take the
|
||||||
|
address of the struct and add the size of the struct.
|
||||||
|
|
||||||
Cgo translates C types into equivalent unexported Go types.
|
Cgo translates C types into equivalent unexported Go types.
|
||||||
Because the translations are unexported, a Go package should not
|
Because the translations are unexported, a Go package should not
|
||||||
expose C types in its exported API: a C type used in one Go package
|
expose C types in its exported API: a C type used in one Go package
|
||||||
|
|
|
||||||
|
|
@ -626,9 +626,7 @@ func (p *Package) rewriteCall(f *File, call *ast.CallExpr, name *Name) {
|
||||||
|
|
||||||
// Add optional additional arguments for an address
|
// Add optional additional arguments for an address
|
||||||
// expression.
|
// expression.
|
||||||
if u, ok := call.Args[i].(*ast.UnaryExpr); ok && u.Op == token.AND {
|
c.Args = p.checkAddrArgs(f, c.Args, call.Args[i])
|
||||||
c.Args = p.checkAddrArgs(f, c.Args, u.X)
|
|
||||||
}
|
|
||||||
|
|
||||||
// _cgoCheckPointer returns interface{}.
|
// _cgoCheckPointer returns interface{}.
|
||||||
// We need to type assert that to the type we want.
|
// We need to type assert that to the type we want.
|
||||||
|
|
@ -773,7 +771,19 @@ func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
|
||||||
// only pass the slice or array if we can refer to it without side
|
// only pass the slice or array if we can refer to it without side
|
||||||
// effects.
|
// effects.
|
||||||
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
|
func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr {
|
||||||
index, ok := x.(*ast.IndexExpr)
|
// Strip type conversions.
|
||||||
|
for {
|
||||||
|
c, ok := x.(*ast.CallExpr)
|
||||||
|
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
x = c.Args[0]
|
||||||
|
}
|
||||||
|
u, ok := x.(*ast.UnaryExpr)
|
||||||
|
if !ok || u.Op != token.AND {
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
index, ok := u.X.(*ast.IndexExpr)
|
||||||
if !ok {
|
if !ok {
|
||||||
// This is the address of something that is not an
|
// This is the address of something that is not an
|
||||||
// index expression. We only need to examine the
|
// index expression. We only need to examine the
|
||||||
|
|
@ -804,6 +814,42 @@ func (p *Package) hasSideEffects(f *File, x ast.Expr) bool {
|
||||||
return found
|
return found
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isType returns whether the expression is definitely a type.
|
||||||
|
// This is conservative--it returns false for an unknown identifier.
|
||||||
|
func (p *Package) isType(t ast.Expr) bool {
|
||||||
|
switch t := t.(type) {
|
||||||
|
case *ast.SelectorExpr:
|
||||||
|
if t.Sel.Name != "Pointer" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
id, ok := t.X.(*ast.Ident)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return id.Name == "unsafe"
|
||||||
|
case *ast.Ident:
|
||||||
|
// TODO: This ignores shadowing.
|
||||||
|
switch t.Name {
|
||||||
|
case "unsafe.Pointer", "bool", "byte",
|
||||||
|
"complex64", "complex128",
|
||||||
|
"error",
|
||||||
|
"float32", "float64",
|
||||||
|
"int", "int8", "int16", "int32", "int64",
|
||||||
|
"rune", "string",
|
||||||
|
"uint", "uint8", "uint16", "uint32", "uint64", "uintptr":
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case *ast.StarExpr:
|
||||||
|
return p.isType(t.X)
|
||||||
|
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType,
|
||||||
|
*ast.MapType, *ast.ChanType:
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// unsafeCheckPointerName is given the Go version of a C type. If the
|
// unsafeCheckPointerName is given the Go version of a C type. If the
|
||||||
// type uses unsafe.Pointer, we arrange to build a version of
|
// type uses unsafe.Pointer, we arrange to build a version of
|
||||||
// _cgoCheckPointer that returns that type. This avoids using a type
|
// _cgoCheckPointer that returns that type. This avoids using a type
|
||||||
|
|
@ -832,6 +878,8 @@ func (p *Package) unsafeCheckPointerName(t ast.Expr) string {
|
||||||
func (p *Package) hasUnsafePointer(t ast.Expr) bool {
|
func (p *Package) hasUnsafePointer(t ast.Expr) bool {
|
||||||
switch t := t.(type) {
|
switch t := t.(type) {
|
||||||
case *ast.Ident:
|
case *ast.Ident:
|
||||||
|
// We don't see a SelectorExpr for unsafe.Pointer;
|
||||||
|
// this is created by code in this file.
|
||||||
return t.Name == "unsafe.Pointer"
|
return t.Name == "unsafe.Pointer"
|
||||||
case *ast.ArrayType:
|
case *ast.ArrayType:
|
||||||
return p.hasUnsafePointer(t.Elt)
|
return p.hasUnsafePointer(t.Elt)
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the Intel opcode.
|
// for USED and SET annotations, not the Intel opcode.
|
||||||
|
|
|
||||||
|
|
@ -1366,6 +1366,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
|
||||||
obj.AFUNCDATA,
|
obj.AFUNCDATA,
|
||||||
obj.AVARDEF,
|
obj.AVARDEF,
|
||||||
obj.AVARKILL,
|
obj.AVARKILL,
|
||||||
|
obj.AVARLIVE,
|
||||||
obj.AUSEFIELD:
|
obj.AUSEFIELD:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ var progtable = [arm.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the Intel opcode.
|
// for USED and SET annotations, not the Intel opcode.
|
||||||
|
|
|
||||||
|
|
@ -711,6 +711,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
|
||||||
obj.AFUNCDATA,
|
obj.AFUNCDATA,
|
||||||
obj.AVARDEF,
|
obj.AVARDEF,
|
||||||
obj.AVARKILL,
|
obj.AVARKILL,
|
||||||
|
obj.AVARLIVE,
|
||||||
obj.AUSEFIELD:
|
obj.AUSEFIELD:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ var progtable = [arm64.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the Power opcode.
|
// for USED and SET annotations, not the Power opcode.
|
||||||
|
|
|
||||||
|
|
@ -634,6 +634,7 @@ func evconst(n *Node) {
|
||||||
var wr int
|
var wr int
|
||||||
var v Val
|
var v Val
|
||||||
var norig *Node
|
var norig *Node
|
||||||
|
var nn *Node
|
||||||
if nr == nil {
|
if nr == nil {
|
||||||
// copy numeric value to avoid modifying
|
// copy numeric value to avoid modifying
|
||||||
// nl, in case someone still refers to it (e.g. iota).
|
// nl, in case someone still refers to it (e.g. iota).
|
||||||
|
|
@ -1115,15 +1116,21 @@ ret:
|
||||||
return
|
return
|
||||||
|
|
||||||
settrue:
|
settrue:
|
||||||
norig = saveorig(n)
|
nn = Nodbool(true)
|
||||||
*n = *Nodbool(true)
|
nn.Orig = saveorig(n)
|
||||||
n.Orig = norig
|
if !iscmp[n.Op] {
|
||||||
|
nn.Type = nl.Type
|
||||||
|
}
|
||||||
|
*n = *nn
|
||||||
return
|
return
|
||||||
|
|
||||||
setfalse:
|
setfalse:
|
||||||
norig = saveorig(n)
|
nn = Nodbool(false)
|
||||||
*n = *Nodbool(false)
|
nn.Orig = saveorig(n)
|
||||||
n.Orig = norig
|
if !iscmp[n.Op] {
|
||||||
|
nn.Type = nl.Type
|
||||||
|
}
|
||||||
|
*n = *nn
|
||||||
return
|
return
|
||||||
|
|
||||||
illegal:
|
illegal:
|
||||||
|
|
|
||||||
|
|
@ -299,12 +299,13 @@ func (l Level) guaranteedDereference() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeEscState struct {
|
type NodeEscState struct {
|
||||||
Curfn *Node
|
Curfn *Node
|
||||||
Escflowsrc *NodeList // flow(this, src)
|
Escflowsrc *NodeList // flow(this, src)
|
||||||
Escretval *NodeList // on OCALLxxx, list of dummy return values
|
Escretval *NodeList // on OCALLxxx, list of dummy return values
|
||||||
Escloopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
|
Escloopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
|
||||||
Esclevel Level
|
Esclevel Level
|
||||||
Walkgen uint32
|
Walkgen uint32
|
||||||
|
Maxextraloopdepth int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *EscState) nodeEscState(n *Node) *NodeEscState {
|
func (e *EscState) nodeEscState(n *Node) *NodeEscState {
|
||||||
|
|
@ -1579,7 +1580,13 @@ func funcOutputAndInput(dst, src *Node) bool {
|
||||||
src.Op == ONAME && src.Class == PPARAM && src.Name.Curfn == dst.Name.Curfn
|
src.Op == ONAME && src.Class == PPARAM && src.Name.Curfn == dst.Name.Curfn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const NOTALOOPDEPTH = -1
|
||||||
|
|
||||||
func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
|
escwalkBody(e, level, dst, src, NOTALOOPDEPTH)
|
||||||
|
}
|
||||||
|
|
||||||
|
func escwalkBody(e *EscState, level Level, dst *Node, src *Node, extraloopdepth int32) {
|
||||||
if src.Op == OLITERAL {
|
if src.Op == OLITERAL {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -1590,16 +1597,29 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
// convergence.
|
// convergence.
|
||||||
level = level.min(srcE.Esclevel)
|
level = level.min(srcE.Esclevel)
|
||||||
if level == srcE.Esclevel {
|
if level == srcE.Esclevel {
|
||||||
return
|
// Have we been here already with an extraloopdepth,
|
||||||
|
// or is the extraloopdepth provided no improvement on
|
||||||
|
// what's already been seen?
|
||||||
|
if srcE.Maxextraloopdepth >= extraloopdepth || srcE.Escloopdepth >= extraloopdepth {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srcE.Maxextraloopdepth = extraloopdepth
|
||||||
}
|
}
|
||||||
|
} else { // srcE.Walkgen < e.walkgen -- first time, reset this.
|
||||||
|
srcE.Maxextraloopdepth = NOTALOOPDEPTH
|
||||||
}
|
}
|
||||||
|
|
||||||
srcE.Walkgen = e.walkgen
|
srcE.Walkgen = e.walkgen
|
||||||
srcE.Esclevel = level
|
srcE.Esclevel = level
|
||||||
|
modSrcLoopdepth := srcE.Escloopdepth
|
||||||
|
|
||||||
|
if extraloopdepth > modSrcLoopdepth {
|
||||||
|
modSrcLoopdepth = extraloopdepth
|
||||||
|
}
|
||||||
|
|
||||||
if Debug['m'] > 1 {
|
if Debug['m'] > 1 {
|
||||||
fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d]\n",
|
fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d] extraloopdepth=%v\n",
|
||||||
level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(int(src.Op), 0), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), e.curfnSym(src), srcE.Escloopdepth)
|
level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(int(src.Op), 0), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.pdepth++
|
e.pdepth++
|
||||||
|
|
@ -1638,7 +1658,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Escloopdepth < srcE.Escloopdepth
|
leaks = level.int() <= 0 && level.guaranteedDereference() <= 0 && dstE.Escloopdepth < modSrcLoopdepth
|
||||||
|
|
||||||
switch src.Op {
|
switch src.Op {
|
||||||
case ONAME:
|
case ONAME:
|
||||||
|
|
@ -1650,7 +1670,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
Warnl(int(src.Lineno), "leaking param content: %v", Nconv(src, obj.FmtShort))
|
Warnl(int(src.Lineno), "leaking param content: %v", Nconv(src, obj.FmtShort))
|
||||||
} else {
|
} else {
|
||||||
Warnl(int(src.Lineno), "leaking param content: %v level=%v dst.eld=%v src.eld=%v dst=%v",
|
Warnl(int(src.Lineno), "leaking param content: %v level=%v dst.eld=%v src.eld=%v dst=%v",
|
||||||
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, srcE.Escloopdepth, Nconv(dst, obj.FmtShort))
|
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth, Nconv(dst, obj.FmtShort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -1660,7 +1680,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
|
Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
|
||||||
} else {
|
} else {
|
||||||
Warnl(int(src.Lineno), "leaking param: %v level=%v dst.eld=%v src.eld=%v dst=%v",
|
Warnl(int(src.Lineno), "leaking param: %v level=%v dst.eld=%v src.eld=%v dst=%v",
|
||||||
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, srcE.Escloopdepth, Nconv(dst, obj.FmtShort))
|
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth, Nconv(dst, obj.FmtShort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1686,15 +1706,17 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
}
|
}
|
||||||
if Debug['m'] > 1 {
|
if Debug['m'] > 1 {
|
||||||
Warnl(int(src.Lineno), "%v escapes to heap, level=%v, dst.eld=%v, src.eld=%v",
|
Warnl(int(src.Lineno), "%v escapes to heap, level=%v, dst.eld=%v, src.eld=%v",
|
||||||
Nconv(p, obj.FmtShort), level, dstE.Escloopdepth, srcE.Escloopdepth)
|
Nconv(p, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth)
|
||||||
} else {
|
} else {
|
||||||
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(p, obj.FmtShort))
|
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(p, obj.FmtShort))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
escwalkBody(e, level.dec(), dst, src.Left, modSrcLoopdepth)
|
||||||
|
extraloopdepth = modSrcLoopdepth // passes to recursive case, seems likely a no-op
|
||||||
|
} else {
|
||||||
|
escwalk(e, level.dec(), dst, src.Left)
|
||||||
}
|
}
|
||||||
|
|
||||||
escwalk(e, level.dec(), dst, src.Left)
|
|
||||||
|
|
||||||
case OAPPEND:
|
case OAPPEND:
|
||||||
escwalk(e, level, dst, src.List.N)
|
escwalk(e, level, dst, src.List.N)
|
||||||
|
|
||||||
|
|
@ -1704,6 +1726,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
if Debug['m'] != 0 {
|
if Debug['m'] != 0 {
|
||||||
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
||||||
}
|
}
|
||||||
|
extraloopdepth = modSrcLoopdepth
|
||||||
}
|
}
|
||||||
// similar to a slice arraylit and its args.
|
// similar to a slice arraylit and its args.
|
||||||
level = level.dec()
|
level = level.dec()
|
||||||
|
|
@ -1737,6 +1760,7 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
if Debug['m'] != 0 {
|
if Debug['m'] != 0 {
|
||||||
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
|
||||||
}
|
}
|
||||||
|
extraloopdepth = modSrcLoopdepth
|
||||||
}
|
}
|
||||||
|
|
||||||
case ODOT,
|
case ODOT,
|
||||||
|
|
@ -1778,12 +1802,19 @@ func escwalk(e *EscState, level Level, dst *Node, src *Node) {
|
||||||
recurse:
|
recurse:
|
||||||
level = level.copy()
|
level = level.copy()
|
||||||
for ll := srcE.Escflowsrc; ll != nil; ll = ll.Next {
|
for ll := srcE.Escflowsrc; ll != nil; ll = ll.Next {
|
||||||
escwalk(e, level, dst, ll.N)
|
escwalkBody(e, level, dst, ll.N, extraloopdepth)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.pdepth--
|
e.pdepth--
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This special tag is applied to uintptr variables
|
||||||
|
// that we believe may hold unsafe.Pointers for
|
||||||
|
// calls into assembly functions.
|
||||||
|
// It is logically a constant, but using a var
|
||||||
|
// lets us take the address below to get a *string.
|
||||||
|
var unsafeUintptrTag = "unsafe-uintptr"
|
||||||
|
|
||||||
func esctag(e *EscState, func_ *Node) {
|
func esctag(e *EscState, func_ *Node) {
|
||||||
func_.Esc = EscFuncTagged
|
func_.Esc = EscFuncTagged
|
||||||
|
|
||||||
|
|
@ -1798,6 +1829,29 @@ func esctag(e *EscState, func_ *Node) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Assume that uintptr arguments must be held live across the call.
|
||||||
|
// This is most important for syscall.Syscall.
|
||||||
|
// See golang.org/issue/13372.
|
||||||
|
// This really doesn't have much to do with escape analysis per se,
|
||||||
|
// but we are reusing the ability to annotate an individual function
|
||||||
|
// argument and pass those annotations along to importing code.
|
||||||
|
narg := 0
|
||||||
|
for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
|
||||||
|
narg++
|
||||||
|
if t.Type.Etype == TUINTPTR {
|
||||||
|
if Debug['m'] != 0 {
|
||||||
|
var name string
|
||||||
|
if t.Sym != nil {
|
||||||
|
name = t.Sym.Name
|
||||||
|
} else {
|
||||||
|
name = fmt.Sprintf("arg#%d", narg)
|
||||||
|
}
|
||||||
|
Warnl(int(func_.Lineno), "%v assuming %v is unsafe uintptr", funcSym(func_), name)
|
||||||
|
}
|
||||||
|
t.Note = &unsafeUintptrTag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
102
src/cmd/compile/internal/gc/float_test.go
Normal file
102
src/cmd/compile/internal/gc/float_test.go
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gc
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// For GO386=387, make sure fucomi* opcodes are not used
|
||||||
|
// for comparison operations.
|
||||||
|
// Note that this test will fail only on a Pentium MMX
|
||||||
|
// processor (with GOARCH=386 GO386=387), as it just runs
|
||||||
|
// some code and looks for an unimplemented instruction fault.
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func compare1(a, b float64) bool {
|
||||||
|
return a < b
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func compare2(a, b float32) bool {
|
||||||
|
return a < b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFloatCompare(t *testing.T) {
|
||||||
|
if !compare1(3, 5) {
|
||||||
|
t.Errorf("compare1 returned false")
|
||||||
|
}
|
||||||
|
if !compare2(3, 5) {
|
||||||
|
t.Errorf("compare2 returned false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For GO386=387, make sure fucomi* opcodes are not used
|
||||||
|
// for float->int conversions.
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt1(a float64) uint64 {
|
||||||
|
return uint64(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt2(a float64) uint32 {
|
||||||
|
return uint32(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt3(a float32) uint64 {
|
||||||
|
return uint64(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt4(a float32) uint32 {
|
||||||
|
return uint32(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt5(a float64) int64 {
|
||||||
|
return int64(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt6(a float64) int32 {
|
||||||
|
return int32(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt7(a float32) int64 {
|
||||||
|
return int64(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noinline
|
||||||
|
func cvt8(a float32) int32 {
|
||||||
|
return int32(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFloatConvert(t *testing.T) {
|
||||||
|
if got := cvt1(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt1 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt2(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt2 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt3(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt3 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt4(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt4 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt5(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt5 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt6(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt6 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt7(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt7 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
if got := cvt8(3.5); got != 3 {
|
||||||
|
t.Errorf("cvt8 got %d, wanted 3", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -607,6 +607,9 @@ func Tempname(nn *Node, t *Type) {
|
||||||
n.Esc = EscNever
|
n.Esc = EscNever
|
||||||
n.Name.Curfn = Curfn
|
n.Name.Curfn = Curfn
|
||||||
Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
|
Curfn.Func.Dcl = list(Curfn.Func.Dcl, n)
|
||||||
|
if Debug['h'] != 0 {
|
||||||
|
println("H", n, n.Orig, funcSym(Curfn).Name)
|
||||||
|
}
|
||||||
|
|
||||||
dowidth(t)
|
dowidth(t)
|
||||||
n.Xoffset = 0
|
n.Xoffset = 0
|
||||||
|
|
@ -870,6 +873,9 @@ func gen(n *Node) {
|
||||||
|
|
||||||
case OVARKILL:
|
case OVARKILL:
|
||||||
gvarkill(n.Left)
|
gvarkill(n.Left)
|
||||||
|
|
||||||
|
case OVARLIVE:
|
||||||
|
gvarlive(n.Left)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret:
|
ret:
|
||||||
|
|
|
||||||
|
|
@ -185,7 +185,7 @@ func fixautoused(p *obj.Prog) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
|
if (p.As == obj.AVARDEF || p.As == obj.AVARKILL || p.As == obj.AVARLIVE) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
|
||||||
// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
|
// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
|
||||||
// VARDEFs are interspersed with other code, and a jump might be using the
|
// VARDEFs are interspersed with other code, and a jump might be using the
|
||||||
// VARDEF as a target. Replace with a no-op instead. A later pass will remove
|
// VARDEF as a target. Replace with a no-op instead. A later pass will remove
|
||||||
|
|
|
||||||
|
|
@ -694,7 +694,13 @@ func importfile(f *Val, line int) {
|
||||||
errorexit()
|
errorexit()
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.U.(string) == "unsafe" {
|
path_ := f.U.(string)
|
||||||
|
|
||||||
|
if mapped, ok := importMap[path_]; ok {
|
||||||
|
path_ = mapped
|
||||||
|
}
|
||||||
|
|
||||||
|
if path_ == "unsafe" {
|
||||||
if safemode != 0 {
|
if safemode != 0 {
|
||||||
Yyerror("cannot import package unsafe")
|
Yyerror("cannot import package unsafe")
|
||||||
errorexit()
|
errorexit()
|
||||||
|
|
@ -706,12 +712,6 @@ func importfile(f *Val, line int) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
path_ := f.U.(string)
|
|
||||||
|
|
||||||
if mapped, ok := importMap[path_]; ok {
|
|
||||||
path_ = mapped
|
|
||||||
}
|
|
||||||
|
|
||||||
if islocalname(path_) {
|
if islocalname(path_) {
|
||||||
if path_[0] == '/' {
|
if path_[0] == '/' {
|
||||||
Yyerror("import path cannot be absolute path")
|
Yyerror("import path cannot be absolute path")
|
||||||
|
|
|
||||||
|
|
@ -243,6 +243,13 @@ func cleantempnopop(mark *NodeList, order *Order, out **NodeList) {
|
||||||
var kill *Node
|
var kill *Node
|
||||||
|
|
||||||
for l := order.temp; l != mark; l = l.Next {
|
for l := order.temp; l != mark; l = l.Next {
|
||||||
|
if l.N.Name.Keepalive {
|
||||||
|
l.N.Name.Keepalive = false
|
||||||
|
l.N.Addrtaken = true // ensure SSA keeps the l.N variable
|
||||||
|
kill = Nod(OVARLIVE, l.N, nil)
|
||||||
|
typecheck(&kill, Etop)
|
||||||
|
*out = list(*out, kill)
|
||||||
|
}
|
||||||
kill = Nod(OVARKILL, l.N, nil)
|
kill = Nod(OVARKILL, l.N, nil)
|
||||||
typecheck(&kill, Etop)
|
typecheck(&kill, Etop)
|
||||||
*out = list(*out, kill)
|
*out = list(*out, kill)
|
||||||
|
|
@ -375,6 +382,28 @@ func ordercall(n *Node, order *Order) {
|
||||||
orderexpr(&n.Left, order, nil)
|
orderexpr(&n.Left, order, nil)
|
||||||
orderexpr(&n.Right, order, nil) // ODDDARG temp
|
orderexpr(&n.Right, order, nil) // ODDDARG temp
|
||||||
ordercallargs(&n.List, order)
|
ordercallargs(&n.List, order)
|
||||||
|
|
||||||
|
if n.Op == OCALLFUNC {
|
||||||
|
for l, t := n.List, getinargx(n.Left.Type).Type; l != nil && t != nil; l, t = l.Next, t.Down {
|
||||||
|
// Check for "unsafe-uintptr" tag provided by escape analysis.
|
||||||
|
// If present and the argument is really a pointer being converted
|
||||||
|
// to uintptr, arrange for the pointer to be kept alive until the call
|
||||||
|
// returns, by copying it into a temp and marking that temp
|
||||||
|
// still alive when we pop the temp stack.
|
||||||
|
if t.Note != nil && *t.Note == unsafeUintptrTag {
|
||||||
|
xp := &l.N
|
||||||
|
for (*xp).Op == OCONVNOP && !Isptr[(*xp).Type.Etype] {
|
||||||
|
xp = &(*xp).Left
|
||||||
|
}
|
||||||
|
x := *xp
|
||||||
|
if Isptr[x.Type.Etype] {
|
||||||
|
x = ordercopyexpr(x, x.Type, order, 0)
|
||||||
|
x.Name.Keepalive = true
|
||||||
|
*xp = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ordermapassign appends n to order->out, introducing temporaries
|
// Ordermapassign appends n to order->out, introducing temporaries
|
||||||
|
|
@ -464,7 +493,7 @@ func orderstmt(n *Node, order *Order) {
|
||||||
default:
|
default:
|
||||||
Fatalf("orderstmt %v", Oconv(int(n.Op), 0))
|
Fatalf("orderstmt %v", Oconv(int(n.Op), 0))
|
||||||
|
|
||||||
case OVARKILL:
|
case OVARKILL, OVARLIVE:
|
||||||
order.out = list(order.out, n)
|
order.out = list(order.out, n)
|
||||||
|
|
||||||
case OAS:
|
case OAS:
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,11 @@ func gvardefx(n *Node, as int) {
|
||||||
|
|
||||||
switch n.Class {
|
switch n.Class {
|
||||||
case PAUTO, PPARAM, PPARAMOUT:
|
case PAUTO, PPARAM, PPARAMOUT:
|
||||||
Thearch.Gins(as, nil, n)
|
if as == obj.AVARLIVE {
|
||||||
|
Thearch.Gins(as, n, nil)
|
||||||
|
} else {
|
||||||
|
Thearch.Gins(as, nil, n)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -107,13 +111,17 @@ func gvarkill(n *Node) {
|
||||||
gvardefx(n, obj.AVARKILL)
|
gvardefx(n, obj.AVARKILL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func gvarlive(n *Node) {
|
||||||
|
gvardefx(n, obj.AVARLIVE)
|
||||||
|
}
|
||||||
|
|
||||||
func removevardef(firstp *obj.Prog) {
|
func removevardef(firstp *obj.Prog) {
|
||||||
for p := firstp; p != nil; p = p.Link {
|
for p := firstp; p != nil; p = p.Link {
|
||||||
for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
|
for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL || p.Link.As == obj.AVARLIVE) {
|
||||||
p.Link = p.Link.Link
|
p.Link = p.Link.Link
|
||||||
}
|
}
|
||||||
if p.To.Type == obj.TYPE_BRANCH {
|
if p.To.Type == obj.TYPE_BRANCH {
|
||||||
for p.To.Val.(*obj.Prog) != nil && (p.To.Val.(*obj.Prog).As == obj.AVARDEF || p.To.Val.(*obj.Prog).As == obj.AVARKILL) {
|
for p.To.Val.(*obj.Prog) != nil && (p.To.Val.(*obj.Prog).As == obj.AVARDEF || p.To.Val.(*obj.Prog).As == obj.AVARKILL || p.To.Val.(*obj.Prog).As == obj.AVARLIVE) {
|
||||||
p.To.Val = p.To.Val.(*obj.Prog).Link
|
p.To.Val = p.To.Val.(*obj.Prog).Link
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -809,7 +809,7 @@ func checkauto(fn *Node, p *obj.Prog, n *Node) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %v\n", Curfn, n, n, n.Class, p)
|
fmt.Printf("checkauto %v: %v (%p; class=%d) not found in %p %v\n", funcSym(Curfn), n, n, n.Class, p, p)
|
||||||
for l := fn.Func.Dcl; l != nil; l = l.Next {
|
for l := fn.Func.Dcl; l != nil; l = l.Next {
|
||||||
fmt.Printf("\t%v (%p; class=%d)\n", l.N, l.N, l.N.Class)
|
fmt.Printf("\t%v (%p; class=%d)\n", l.N, l.N, l.N.Class)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -143,7 +143,7 @@ func instrumentnode(np **Node, init **NodeList, wr int, skip int) {
|
||||||
goto ret
|
goto ret
|
||||||
|
|
||||||
// can't matter
|
// can't matter
|
||||||
case OCFUNC, OVARKILL:
|
case OCFUNC, OVARKILL, OVARLIVE:
|
||||||
goto ret
|
goto ret
|
||||||
|
|
||||||
case OBLOCK:
|
case OBLOCK:
|
||||||
|
|
|
||||||
|
|
@ -1073,6 +1073,9 @@ func regopt(firstp *obj.Prog) {
|
||||||
|
|
||||||
for f := firstf; f != nil; f = f.Link {
|
for f := firstf; f != nil; f = f.Link {
|
||||||
p := f.Prog
|
p := f.Prog
|
||||||
|
// AVARLIVE must be considered a use, do not skip it.
|
||||||
|
// Otherwise the variable will be optimized away,
|
||||||
|
// and the whole point of AVARLIVE is to keep it on the stack.
|
||||||
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
|
if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -849,6 +849,13 @@ func (s *state) stmt(n *Node) {
|
||||||
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, ssa.TypeMem, n.Left, s.mem())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case OVARLIVE:
|
||||||
|
// Insert a varlive op to record that a variable is still live.
|
||||||
|
if !n.Left.Addrtaken {
|
||||||
|
s.Fatalf("VARLIVE variable %s must have Addrtaken set", n.Left)
|
||||||
|
}
|
||||||
|
s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, ssa.TypeMem, n.Left, s.mem())
|
||||||
|
|
||||||
case OCHECKNIL:
|
case OCHECKNIL:
|
||||||
p := s.expr(n.Left)
|
p := s.expr(n.Left)
|
||||||
s.nilCheck(p)
|
s.nilCheck(p)
|
||||||
|
|
@ -4122,6 +4129,8 @@ func (s *genState) genValue(v *ssa.Value) {
|
||||||
Gvardef(v.Aux.(*Node))
|
Gvardef(v.Aux.(*Node))
|
||||||
case ssa.OpVarKill:
|
case ssa.OpVarKill:
|
||||||
gvarkill(v.Aux.(*Node))
|
gvarkill(v.Aux.(*Node))
|
||||||
|
case ssa.OpVarLive:
|
||||||
|
gvarlive(v.Aux.(*Node))
|
||||||
case ssa.OpAMD64LoweredNilCheck:
|
case ssa.OpAMD64LoweredNilCheck:
|
||||||
// Optimization - if the subsequent block has a load or store
|
// Optimization - if the subsequent block has a load or store
|
||||||
// at the same address, we don't need to issue this instruction.
|
// at the same address, we don't need to issue this instruction.
|
||||||
|
|
|
||||||
|
|
@ -128,6 +128,7 @@ type Name struct {
|
||||||
Captured bool // is the variable captured by a closure
|
Captured bool // is the variable captured by a closure
|
||||||
Byval bool // is the variable captured by value or by reference
|
Byval bool // is the variable captured by value or by reference
|
||||||
Needzero bool // if it contains pointers, needs to be zeroed on function entry
|
Needzero bool // if it contains pointers, needs to be zeroed on function entry
|
||||||
|
Keepalive bool // mark value live across unknown assembly call
|
||||||
}
|
}
|
||||||
|
|
||||||
type Param struct {
|
type Param struct {
|
||||||
|
|
@ -342,6 +343,7 @@ const (
|
||||||
OCFUNC // reference to c function pointer (not go func value)
|
OCFUNC // reference to c function pointer (not go func value)
|
||||||
OCHECKNIL // emit code to ensure pointer/interface not nil
|
OCHECKNIL // emit code to ensure pointer/interface not nil
|
||||||
OVARKILL // variable is dead
|
OVARKILL // variable is dead
|
||||||
|
OVARLIVE // variable is alive
|
||||||
|
|
||||||
// thearch-specific registers
|
// thearch-specific registers
|
||||||
OREGISTER // a register, such as AX.
|
OREGISTER // a register, such as AX.
|
||||||
|
|
|
||||||
|
|
@ -687,8 +687,6 @@ OpSwitch:
|
||||||
n.Left = l
|
n.Left = l
|
||||||
n.Right = r
|
n.Right = r
|
||||||
}
|
}
|
||||||
} else if n.Op == OANDAND || n.Op == OOROR {
|
|
||||||
evconst(n)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if et == TSTRING {
|
if et == TSTRING {
|
||||||
|
|
@ -2025,7 +2023,8 @@ OpSwitch:
|
||||||
OEMPTY,
|
OEMPTY,
|
||||||
OGOTO,
|
OGOTO,
|
||||||
OXFALL,
|
OXFALL,
|
||||||
OVARKILL:
|
OVARKILL,
|
||||||
|
OVARLIVE:
|
||||||
ok |= Etop
|
ok |= Etop
|
||||||
break OpSwitch
|
break OpSwitch
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -216,7 +216,8 @@ func walkstmt(np **Node) {
|
||||||
ODCLCONST,
|
ODCLCONST,
|
||||||
ODCLTYPE,
|
ODCLTYPE,
|
||||||
OCHECKNIL,
|
OCHECKNIL,
|
||||||
OVARKILL:
|
OVARKILL,
|
||||||
|
OVARLIVE:
|
||||||
break
|
break
|
||||||
|
|
||||||
case OBLOCK:
|
case OBLOCK:
|
||||||
|
|
|
||||||
|
|
@ -688,6 +688,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
|
||||||
obj.AFUNCDATA,
|
obj.AFUNCDATA,
|
||||||
obj.AVARDEF,
|
obj.AVARDEF,
|
||||||
obj.AVARKILL,
|
obj.AVARKILL,
|
||||||
|
obj.AVARLIVE,
|
||||||
obj.AUSEFIELD:
|
obj.AUSEFIELD:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ var progtable = [mips.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the MIPS opcode.
|
// for USED and SET annotations, not the MIPS opcode.
|
||||||
|
|
|
||||||
|
|
@ -953,6 +953,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
|
||||||
obj.AFUNCDATA,
|
obj.AFUNCDATA,
|
||||||
obj.AVARDEF,
|
obj.AVARDEF,
|
||||||
obj.AVARKILL,
|
obj.AVARKILL,
|
||||||
|
obj.AVARLIVE,
|
||||||
obj.AUSEFIELD:
|
obj.AUSEFIELD:
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ var progtable = [ppc64.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the Power opcode.
|
// for USED and SET annotations, not the Power opcode.
|
||||||
|
|
|
||||||
|
|
@ -373,6 +373,7 @@ var genericOps = []opData{
|
||||||
|
|
||||||
{name: "VarDef", typ: "Mem"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
|
{name: "VarDef", typ: "Mem"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
|
||||||
{name: "VarKill"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
|
{name: "VarKill"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
|
||||||
|
{name: "VarLive"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
|
||||||
}
|
}
|
||||||
|
|
||||||
// kind control successors implicit exit
|
// kind control successors implicit exit
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ func checkLower(f *Func) {
|
||||||
continue // lowered
|
continue // lowered
|
||||||
}
|
}
|
||||||
switch v.Op {
|
switch v.Op {
|
||||||
case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill:
|
case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive:
|
||||||
continue // ok not to lower
|
continue // ok not to lower
|
||||||
}
|
}
|
||||||
s := "not lowered: " + v.Op.String() + " " + v.Type.SimpleString()
|
s := "not lowered: " + v.Op.String() + " " + v.Type.SimpleString()
|
||||||
|
|
|
||||||
|
|
@ -552,6 +552,7 @@ const (
|
||||||
OpFwdRef
|
OpFwdRef
|
||||||
OpVarDef
|
OpVarDef
|
||||||
OpVarKill
|
OpVarKill
|
||||||
|
OpVarLive
|
||||||
)
|
)
|
||||||
|
|
||||||
var opcodeTable = [...]opInfo{
|
var opcodeTable = [...]opInfo{
|
||||||
|
|
@ -4310,6 +4311,10 @@ var opcodeTable = [...]opInfo{
|
||||||
name: "VarKill",
|
name: "VarKill",
|
||||||
generic: true,
|
generic: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "VarLive",
|
||||||
|
generic: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o Op) Asm() int { return opcodeTable[o].asm }
|
func (o Op) Asm() int { return opcodeTable[o].asm }
|
||||||
|
|
|
||||||
|
|
@ -764,9 +764,7 @@ func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
|
||||||
gc.Cgen(nr, &tmp)
|
gc.Cgen(nr, &tmp)
|
||||||
gc.Cgen(nl, &tmp)
|
gc.Cgen(nl, &tmp)
|
||||||
}
|
}
|
||||||
|
gins(x86.AFUCOMPP, &tmp, &n2)
|
||||||
gins(x86.AFUCOMIP, &tmp, &n2)
|
|
||||||
gins(x86.AFMOVDP, &tmp, &tmp) // annoying pop but still better than STSW+SAHF
|
|
||||||
} else {
|
} else {
|
||||||
// TODO(rsc): The moves back and forth to memory
|
// TODO(rsc): The moves back and forth to memory
|
||||||
// here are for truncating the value to 32 bits.
|
// here are for truncating the value to 32 bits.
|
||||||
|
|
@ -783,9 +781,9 @@ func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
|
||||||
gc.Cgen(nl, &t2)
|
gc.Cgen(nl, &t2)
|
||||||
gmove(&t2, &tmp)
|
gmove(&t2, &tmp)
|
||||||
gins(x86.AFCOMFP, &t1, &tmp)
|
gins(x86.AFCOMFP, &t1, &tmp)
|
||||||
gins(x86.AFSTSW, nil, &ax)
|
|
||||||
gins(x86.ASAHF, nil, nil)
|
|
||||||
}
|
}
|
||||||
|
gins(x86.AFSTSW, nil, &ax)
|
||||||
|
gins(x86.ASAHF, nil, nil)
|
||||||
} else {
|
} else {
|
||||||
// Not 387
|
// Not 387
|
||||||
if !nl.Addable {
|
if !nl.Addable {
|
||||||
|
|
|
||||||
|
|
@ -1198,14 +1198,17 @@ func floatmove(f *gc.Node, t *gc.Node) {
|
||||||
|
|
||||||
// if 0 > v { answer = 0 }
|
// if 0 > v { answer = 0 }
|
||||||
gins(x86.AFMOVD, &zerof, &f0)
|
gins(x86.AFMOVD, &zerof, &f0)
|
||||||
|
gins(x86.AFUCOMP, &f0, &f1)
|
||||||
gins(x86.AFUCOMIP, &f0, &f1)
|
gins(x86.AFSTSW, nil, &ax)
|
||||||
|
gins(x86.ASAHF, nil, nil)
|
||||||
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
|
p1 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
|
||||||
|
|
||||||
// if 1<<64 <= v { answer = 0 too }
|
// if 1<<64 <= v { answer = 0 too }
|
||||||
gins(x86.AFMOVD, &two64f, &f0)
|
gins(x86.AFMOVD, &two64f, &f0)
|
||||||
|
|
||||||
gins(x86.AFUCOMIP, &f0, &f1)
|
gins(x86.AFUCOMP, &f0, &f1)
|
||||||
|
gins(x86.AFSTSW, nil, &ax)
|
||||||
|
gins(x86.ASAHF, nil, nil)
|
||||||
p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
|
p2 := gc.Gbranch(optoas(gc.OGT, gc.Types[tt]), nil, 0)
|
||||||
gc.Patch(p1, gc.Pc)
|
gc.Patch(p1, gc.Pc)
|
||||||
gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
|
gins(x86.AFMOVVP, &f0, t) // don't care about t, but will pop the stack
|
||||||
|
|
@ -1235,7 +1238,9 @@ func floatmove(f *gc.Node, t *gc.Node) {
|
||||||
// actual work
|
// actual work
|
||||||
gins(x86.AFMOVD, &two63f, &f0)
|
gins(x86.AFMOVD, &two63f, &f0)
|
||||||
|
|
||||||
gins(x86.AFUCOMIP, &f0, &f1)
|
gins(x86.AFUCOMP, &f0, &f1)
|
||||||
|
gins(x86.AFSTSW, nil, &ax)
|
||||||
|
gins(x86.ASAHF, nil, nil)
|
||||||
p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
|
p2 = gc.Gbranch(optoas(gc.OLE, gc.Types[tt]), nil, 0)
|
||||||
gins(x86.AFMOVVP, &f0, t)
|
gins(x86.AFMOVVP, &f0, t)
|
||||||
p3 := gc.Gbranch(obj.AJMP, nil, 0)
|
p3 := gc.Gbranch(obj.AJMP, nil, 0)
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,7 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
||||||
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
obj.ACHECKNIL: {Flags: gc.LeftRead},
|
||||||
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
|
||||||
|
obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
|
||||||
|
|
||||||
// NOP is an internal no-op that also stands
|
// NOP is an internal no-op that also stands
|
||||||
// for USED and SET annotations, not the Intel opcode.
|
// for USED and SET annotations, not the Intel opcode.
|
||||||
|
|
@ -91,8 +92,12 @@ var progtable = [x86.ALAST]obj.ProgInfo{
|
||||||
x86.AFCOMDPP: {Flags: gc.SizeD | gc.LeftAddr | gc.RightRead},
|
x86.AFCOMDPP: {Flags: gc.SizeD | gc.LeftAddr | gc.RightRead},
|
||||||
x86.AFCOMF: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
x86.AFCOMF: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
||||||
x86.AFCOMFP: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
x86.AFCOMFP: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
||||||
x86.AFUCOMIP: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
// NOTE(khr): don't use FUCOMI* instructions, not available
|
||||||
x86.AFCHS: {Flags: gc.SizeD | RightRdwr}, // also SizeF
|
// on Pentium MMX. See issue 13923.
|
||||||
|
//x86.AFUCOMIP: {Flags: gc.SizeF | gc.LeftAddr | gc.RightRead},
|
||||||
|
x86.AFUCOMP: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
|
||||||
|
x86.AFUCOMPP: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
|
||||||
|
x86.AFCHS: {Flags: gc.SizeD | RightRdwr}, // also SizeF
|
||||||
|
|
||||||
x86.AFDIVDP: {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
|
x86.AFDIVDP: {Flags: gc.SizeD | gc.LeftAddr | RightRdwr},
|
||||||
x86.AFDIVF: {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
|
x86.AFDIVF: {Flags: gc.SizeF | gc.LeftAddr | RightRdwr},
|
||||||
|
|
|
||||||
8
src/cmd/dist/test.go
vendored
8
src/cmd/dist/test.go
vendored
|
|
@ -656,7 +656,7 @@ func (t *tester) supportedBuildmode(mode string) bool {
|
||||||
case "c-shared":
|
case "c-shared":
|
||||||
switch pair {
|
switch pair {
|
||||||
case "linux-386", "linux-amd64", "linux-arm", "linux-arm64",
|
case "linux-386", "linux-amd64", "linux-arm", "linux-arm64",
|
||||||
"darwin-amd64",
|
"darwin-amd64", "darwin-386",
|
||||||
"android-arm", "android-arm64", "android-386":
|
"android-arm", "android-arm64", "android-386":
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
@ -913,6 +913,12 @@ func (t *tester) cgoTestSO(dt *distTest, testpath string) error {
|
||||||
s = "DYLD_LIBRARY_PATH"
|
s = "DYLD_LIBRARY_PATH"
|
||||||
}
|
}
|
||||||
cmd.Env = mergeEnvLists([]string{s + "=."}, os.Environ())
|
cmd.Env = mergeEnvLists([]string{s + "=."}, os.Environ())
|
||||||
|
|
||||||
|
// On FreeBSD 64-bit architectures, the 32-bit linker looks for
|
||||||
|
// different environment variables.
|
||||||
|
if t.goos == "freebsd" && t.gohostarch == "386" {
|
||||||
|
cmd.Env = mergeEnvLists([]string{"LD_32_LIBRARY_PATH=."}, cmd.Env)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return cmd.Run()
|
return cmd.Run()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
2
src/cmd/dist/util.go
vendored
2
src/cmd/dist/util.go
vendored
|
|
@ -461,7 +461,7 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if gohostarch == "arm" {
|
if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" {
|
||||||
maxbg = min(maxbg, runtime.NumCPU())
|
maxbg = min(maxbg, runtime.NumCPU())
|
||||||
}
|
}
|
||||||
bginit()
|
bginit()
|
||||||
|
|
|
||||||
|
|
@ -354,7 +354,7 @@ func buildModeInit() {
|
||||||
case "linux/amd64", "linux/arm", "linux/arm64", "linux/386",
|
case "linux/amd64", "linux/arm", "linux/arm64", "linux/386",
|
||||||
"android/amd64", "android/arm", "android/arm64", "android/386":
|
"android/amd64", "android/arm", "android/arm64", "android/386":
|
||||||
codegenArg = "-shared"
|
codegenArg = "-shared"
|
||||||
case "darwin/amd64":
|
case "darwin/amd64", "darwin/386":
|
||||||
default:
|
default:
|
||||||
fatalf("-buildmode=c-shared not supported on %s\n", platform)
|
fatalf("-buildmode=c-shared not supported on %s\n", platform)
|
||||||
}
|
}
|
||||||
|
|
@ -822,7 +822,9 @@ func goFilesPackage(gofiles []string) *Package {
|
||||||
pkg := new(Package)
|
pkg := new(Package)
|
||||||
pkg.local = true
|
pkg.local = true
|
||||||
pkg.cmdline = true
|
pkg.cmdline = true
|
||||||
|
stk.push("main")
|
||||||
pkg.load(&stk, bp, err)
|
pkg.load(&stk, bp, err)
|
||||||
|
stk.pop()
|
||||||
pkg.localPrefix = dirToImportPath(dir)
|
pkg.localPrefix = dirToImportPath(dir)
|
||||||
pkg.ImportPath = "command-line-arguments"
|
pkg.ImportPath = "command-line-arguments"
|
||||||
pkg.target = ""
|
pkg.target = ""
|
||||||
|
|
@ -999,13 +1001,22 @@ func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, looksha
|
||||||
|
|
||||||
// Install header for cgo in c-archive and c-shared modes.
|
// Install header for cgo in c-archive and c-shared modes.
|
||||||
if p.usesCgo() && (buildBuildmode == "c-archive" || buildBuildmode == "c-shared") {
|
if p.usesCgo() && (buildBuildmode == "c-archive" || buildBuildmode == "c-shared") {
|
||||||
|
hdrTarget := a.target[:len(a.target)-len(filepath.Ext(a.target))] + ".h"
|
||||||
|
if buildContext.Compiler == "gccgo" {
|
||||||
|
// For the header file, remove the "lib"
|
||||||
|
// added by go/build, so we generate pkg.h
|
||||||
|
// rather than libpkg.h.
|
||||||
|
dir, file := filepath.Split(hdrTarget)
|
||||||
|
file = strings.TrimPrefix(file, "lib")
|
||||||
|
hdrTarget = filepath.Join(dir, file)
|
||||||
|
}
|
||||||
ah := &action{
|
ah := &action{
|
||||||
p: a.p,
|
p: a.p,
|
||||||
deps: []*action{a.deps[0]},
|
deps: []*action{a.deps[0]},
|
||||||
f: (*builder).installHeader,
|
f: (*builder).installHeader,
|
||||||
pkgdir: a.pkgdir,
|
pkgdir: a.pkgdir,
|
||||||
objdir: a.objdir,
|
objdir: a.objdir,
|
||||||
target: a.target[:len(a.target)-len(filepath.Ext(a.target))] + ".h",
|
target: hdrTarget,
|
||||||
}
|
}
|
||||||
a.deps = append(a.deps, ah)
|
a.deps = append(a.deps, ah)
|
||||||
}
|
}
|
||||||
|
|
@ -2711,6 +2722,10 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
||||||
// libffi.
|
// libffi.
|
||||||
ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive")
|
ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive")
|
||||||
|
|
||||||
|
if b.gccSupportsNoPie() {
|
||||||
|
ldflags = append(ldflags, "-no-pie")
|
||||||
|
}
|
||||||
|
|
||||||
// We are creating an object file, so we don't want a build ID.
|
// We are creating an object file, so we don't want a build ID.
|
||||||
ldflags = b.disableBuildID(ldflags)
|
ldflags = b.disableBuildID(ldflags)
|
||||||
|
|
||||||
|
|
@ -2718,7 +2733,7 @@ func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions
|
||||||
out = out + ".o"
|
out = out + ".o"
|
||||||
|
|
||||||
case "c-shared":
|
case "c-shared":
|
||||||
ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc")
|
ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fatalf("-buildmode=%s not supported for gccgo", ldBuildmode)
|
fatalf("-buildmode=%s not supported for gccgo", ldBuildmode)
|
||||||
|
|
@ -2902,6 +2917,36 @@ func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On systems with PIE (position independent executables) enabled by default,
|
||||||
|
// -no-pie must be passed when doing a partial link with -Wl,-r. But -no-pie is
|
||||||
|
// not supported by all compilers.
|
||||||
|
func (b *builder) gccSupportsNoPie() bool {
|
||||||
|
if goos != "linux" {
|
||||||
|
// On some BSD platforms, error messages from the
|
||||||
|
// compiler make it to the console despite cmd.Std*
|
||||||
|
// all being nil. As -no-pie is only required on linux
|
||||||
|
// systems so far, we only test there.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
src := filepath.Join(b.work, "trivial.c")
|
||||||
|
if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
cmdArgs := b.gccCmd(b.work)
|
||||||
|
cmdArgs = append(cmdArgs, "-no-pie", "-c", "trivial.c")
|
||||||
|
if buildN || buildX {
|
||||||
|
b.showcmd(b.work, "%s", joinUnambiguously(cmdArgs))
|
||||||
|
if buildN {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||||
|
cmd.Dir = b.work
|
||||||
|
cmd.Env = envForDir(cmd.Dir, os.Environ())
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
return err == nil && !bytes.Contains(out, []byte("unrecognized"))
|
||||||
|
}
|
||||||
|
|
||||||
// gccArchArgs returns arguments to pass to gcc based on the architecture.
|
// gccArchArgs returns arguments to pass to gcc based on the architecture.
|
||||||
func (b *builder) gccArchArgs() []string {
|
func (b *builder) gccArchArgs() []string {
|
||||||
switch goarch {
|
switch goarch {
|
||||||
|
|
@ -3158,6 +3203,10 @@ func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofi
|
||||||
}
|
}
|
||||||
ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs)
|
ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs)
|
||||||
|
|
||||||
|
if b.gccSupportsNoPie() {
|
||||||
|
ldflags = append(ldflags, "-no-pie")
|
||||||
|
}
|
||||||
|
|
||||||
// We are creating an object file, so we don't want a build ID.
|
// We are creating an object file, so we don't want a build ID.
|
||||||
ldflags = b.disableBuildID(ldflags)
|
ldflags = b.disableBuildID(ldflags)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -961,6 +961,16 @@ func TestInternalPackagesOutsideGOROOTAreRespected(t *testing.T) {
|
||||||
tg.grepBoth("use of internal package not allowed", "wrote error message for testdata/testinternal2")
|
tg.grepBoth("use of internal package not allowed", "wrote error message for testdata/testinternal2")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRunInternal(t *testing.T) {
|
||||||
|
tg := testgo(t)
|
||||||
|
defer tg.cleanup()
|
||||||
|
dir := filepath.Join(tg.pwd(), "testdata")
|
||||||
|
tg.setenv("GOPATH", dir)
|
||||||
|
tg.run("run", filepath.Join(dir, "src/run/good.go"))
|
||||||
|
tg.runFail("run", filepath.Join(dir, "src/run/bad.go"))
|
||||||
|
tg.grepStderr("use of internal package not allowed", "unexpected error for run/bad.go")
|
||||||
|
}
|
||||||
|
|
||||||
func testMove(t *testing.T, vcs, url, base, config string) {
|
func testMove(t *testing.T, vcs, url, base, config string) {
|
||||||
testenv.MustHaveExternalNetwork(t)
|
testenv.MustHaveExternalNetwork(t)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -348,11 +348,9 @@ func loadImport(path, srcDir string, parent *Package, stk *importStack, importPo
|
||||||
// TODO: After Go 1, decide when to pass build.AllowBinary here.
|
// TODO: After Go 1, decide when to pass build.AllowBinary here.
|
||||||
// See issue 3268 for mistakes to avoid.
|
// See issue 3268 for mistakes to avoid.
|
||||||
buildMode := build.ImportComment
|
buildMode := build.ImportComment
|
||||||
if go15VendorExperiment && mode&useVendor != 0 && path == origPath {
|
if !go15VendorExperiment || mode&useVendor == 0 || path != origPath {
|
||||||
// We've already searched the vendor directories and didn't find anything.
|
// Not vendoring, or we already found the vendored path.
|
||||||
// Let Import search them again so that, if the package is not found anywhere,
|
buildMode |= build.IgnoreVendor
|
||||||
// the error includes the vendor directories in the list of places considered.
|
|
||||||
buildMode |= build.AllowVendor
|
|
||||||
}
|
}
|
||||||
bp, err := buildContext.Import(path, srcDir, buildMode)
|
bp, err := buildContext.Import(path, srcDir, buildMode)
|
||||||
bp.ImportPath = importPath
|
bp.ImportPath = importPath
|
||||||
|
|
@ -422,7 +420,7 @@ func vendoredImportPath(parent *Package, path string) (found string) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
targ := filepath.Join(dir[:i], vpath)
|
targ := filepath.Join(dir[:i], vpath)
|
||||||
if isDir(targ) {
|
if isDir(targ) && hasGoFiles(targ) {
|
||||||
// We started with parent's dir c:\gopath\src\foo\bar\baz\quux\xyzzy.
|
// We started with parent's dir c:\gopath\src\foo\bar\baz\quux\xyzzy.
|
||||||
// We know the import path for parent's dir.
|
// We know the import path for parent's dir.
|
||||||
// We chopped off some number of path elements and
|
// We chopped off some number of path elements and
|
||||||
|
|
@ -445,6 +443,20 @@ func vendoredImportPath(parent *Package, path string) (found string) {
|
||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasGoFiles reports whether dir contains any files with names ending in .go.
|
||||||
|
// For a vendor check we must exclude directories that contain no .go files.
|
||||||
|
// Otherwise it is not possible to vendor just a/b/c and still import the
|
||||||
|
// non-vendored a/b. See golang.org/issue/13832.
|
||||||
|
func hasGoFiles(dir string) bool {
|
||||||
|
fis, _ := ioutil.ReadDir(dir)
|
||||||
|
for _, fi := range fis {
|
||||||
|
if !fi.IsDir() && strings.HasSuffix(fi.Name(), ".go") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// reusePackage reuses package p to satisfy the import at the top
|
// reusePackage reuses package p to satisfy the import at the top
|
||||||
// of the import stack stk. If this use causes an import loop,
|
// of the import stack stk. If this use causes an import loop,
|
||||||
// reusePackage updates p's error information to record the loop.
|
// reusePackage updates p's error information to record the loop.
|
||||||
|
|
@ -504,7 +516,7 @@ func disallowInternal(srcDir string, p *Package, stk *importStack) *Package {
|
||||||
i-- // rewind over slash in ".../internal"
|
i-- // rewind over slash in ".../internal"
|
||||||
}
|
}
|
||||||
parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)]
|
parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)]
|
||||||
if hasPathPrefix(filepath.ToSlash(srcDir), filepath.ToSlash(parent)) {
|
if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -601,7 +613,7 @@ func disallowVendorVisibility(srcDir string, p *Package, stk *importStack) *Pack
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
parent := p.Dir[:truncateTo]
|
parent := p.Dir[:truncateTo]
|
||||||
if hasPathPrefix(filepath.ToSlash(srcDir), filepath.ToSlash(parent)) {
|
if hasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
5
src/cmd/go/testdata/src/run/bad.go
vendored
Normal file
5
src/cmd/go/testdata/src/run/bad.go
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import _ "run/subdir/internal/private"
|
||||||
|
|
||||||
|
func main() {}
|
||||||
5
src/cmd/go/testdata/src/run/good.go
vendored
Normal file
5
src/cmd/go/testdata/src/run/good.go
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import _ "run/internal"
|
||||||
|
|
||||||
|
func main() {}
|
||||||
1
src/cmd/go/testdata/src/run/internal/internal.go
vendored
Normal file
1
src/cmd/go/testdata/src/run/internal/internal.go
vendored
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
package internal
|
||||||
1
src/cmd/go/testdata/src/run/subdir/internal/private/private.go
vendored
Normal file
1
src/cmd/go/testdata/src/run/subdir/internal/private/private.go
vendored
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
package private
|
||||||
1
src/cmd/go/testdata/src/vend/dir1/dir1.go
vendored
Normal file
1
src/cmd/go/testdata/src/vend/dir1/dir1.go
vendored
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
package dir1
|
||||||
1
src/cmd/go/testdata/src/vend/vendor/vend/dir1/dir2/dir2.go
vendored
Normal file
1
src/cmd/go/testdata/src/vend/vendor/vend/dir1/dir2/dir2.go
vendored
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
package dir2
|
||||||
2
src/cmd/go/testdata/src/vend/x/x.go
vendored
2
src/cmd/go/testdata/src/vend/x/x.go
vendored
|
|
@ -3,3 +3,5 @@ package x
|
||||||
import _ "p"
|
import _ "p"
|
||||||
import _ "q"
|
import _ "q"
|
||||||
import _ "r"
|
import _ "r"
|
||||||
|
import _ "vend/dir1" // not vendored
|
||||||
|
import _ "vend/dir1/dir2" // vendored
|
||||||
|
|
|
||||||
|
|
@ -24,12 +24,14 @@ func TestVendorImports(t *testing.T) {
|
||||||
tg.run("list", "-f", "{{.ImportPath}} {{.Imports}}", "vend/...")
|
tg.run("list", "-f", "{{.ImportPath}} {{.Imports}}", "vend/...")
|
||||||
want := `
|
want := `
|
||||||
vend [vend/vendor/p r]
|
vend [vend/vendor/p r]
|
||||||
|
vend/dir1 []
|
||||||
vend/hello [fmt vend/vendor/strings]
|
vend/hello [fmt vend/vendor/strings]
|
||||||
vend/subdir [vend/vendor/p r]
|
vend/subdir [vend/vendor/p r]
|
||||||
vend/vendor/p []
|
vend/vendor/p []
|
||||||
vend/vendor/q []
|
vend/vendor/q []
|
||||||
vend/vendor/strings []
|
vend/vendor/strings []
|
||||||
vend/x [vend/x/vendor/p vend/vendor/q vend/x/vendor/r]
|
vend/vendor/vend/dir1/dir2 []
|
||||||
|
vend/x [vend/x/vendor/p vend/vendor/q vend/x/vendor/r vend/dir1 vend/vendor/vend/dir1/dir2]
|
||||||
vend/x/invalid [vend/x/invalid/vendor/foo]
|
vend/x/invalid [vend/x/invalid/vendor/foo]
|
||||||
vend/x/vendor/p []
|
vend/x/vendor/p []
|
||||||
vend/x/vendor/p/p [notfound]
|
vend/x/vendor/p/p [notfound]
|
||||||
|
|
@ -45,6 +47,14 @@ func TestVendorImports(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestVendorBuild(t *testing.T) {
|
||||||
|
tg := testgo(t)
|
||||||
|
defer tg.cleanup()
|
||||||
|
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
|
||||||
|
tg.setenv("GO15VENDOREXPERIMENT", "1")
|
||||||
|
tg.run("build", "vend/x")
|
||||||
|
}
|
||||||
|
|
||||||
func TestVendorRun(t *testing.T) {
|
func TestVendorRun(t *testing.T) {
|
||||||
tg := testgo(t)
|
tg := testgo(t)
|
||||||
defer tg.cleanup()
|
defer tg.cleanup()
|
||||||
|
|
|
||||||
|
|
@ -282,6 +282,7 @@ const (
|
||||||
AUSEFIELD
|
AUSEFIELD
|
||||||
AVARDEF
|
AVARDEF
|
||||||
AVARKILL
|
AVARKILL
|
||||||
|
AVARLIVE
|
||||||
A_ARCHSPECIFIC
|
A_ARCHSPECIFIC
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -609,6 +610,12 @@ type Link struct {
|
||||||
Version int
|
Version int
|
||||||
Textp *LSym
|
Textp *LSym
|
||||||
Etextp *LSym
|
Etextp *LSym
|
||||||
|
|
||||||
|
// state for writing objects
|
||||||
|
Text *LSym
|
||||||
|
Data *LSym
|
||||||
|
Etext *LSym
|
||||||
|
Edata *LSym
|
||||||
}
|
}
|
||||||
|
|
||||||
// The smallest possible offset from the hardware stack pointer to a local
|
// The smallest possible offset from the hardware stack pointer to a local
|
||||||
|
|
|
||||||
|
|
@ -111,6 +111,11 @@ import (
|
||||||
// out a Go object file. The linker does not call this; the linker
|
// out a Go object file. The linker does not call this; the linker
|
||||||
// does not write out object files.
|
// does not write out object files.
|
||||||
func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
|
Flushplist(ctxt)
|
||||||
|
Writeobjfile(ctxt, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Flushplist(ctxt *Link) {
|
||||||
var flag int
|
var flag int
|
||||||
var s *LSym
|
var s *LSym
|
||||||
var p *Prog
|
var p *Prog
|
||||||
|
|
@ -119,13 +124,11 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
|
|
||||||
// Build list of symbols, and assign instructions to lists.
|
// Build list of symbols, and assign instructions to lists.
|
||||||
// Ignore ctxt->plist boundaries. There are no guarantees there,
|
// Ignore ctxt->plist boundaries. There are no guarantees there,
|
||||||
// and the C compilers and assemblers just use one big list.
|
// and the assemblers just use one big list.
|
||||||
var text *LSym
|
|
||||||
|
|
||||||
var curtext *LSym
|
var curtext *LSym
|
||||||
var data *LSym
|
var text *LSym
|
||||||
var etext *LSym
|
var etext *LSym
|
||||||
var edata *LSym
|
|
||||||
for pl := ctxt.Plist; pl != nil; pl = pl.Link {
|
for pl := ctxt.Plist; pl != nil; pl = pl.Link {
|
||||||
for p = pl.Firstpc; p != nil; p = plink {
|
for p = pl.Firstpc; p != nil; p = plink {
|
||||||
if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
|
if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
|
||||||
|
|
@ -174,10 +177,10 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
log.Fatalf("symbol %s listed multiple times", s.Name)
|
log.Fatalf("symbol %s listed multiple times", s.Name)
|
||||||
}
|
}
|
||||||
s.Onlist = 1
|
s.Onlist = 1
|
||||||
if data == nil {
|
if ctxt.Data == nil {
|
||||||
data = s
|
ctxt.Data = s
|
||||||
} else {
|
} else {
|
||||||
edata.Next = s
|
ctxt.Edata.Next = s
|
||||||
}
|
}
|
||||||
s.Next = nil
|
s.Next = nil
|
||||||
s.Size = p.To.Offset
|
s.Size = p.To.Offset
|
||||||
|
|
@ -195,7 +198,7 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
} else if flag&TLSBSS != 0 {
|
} else if flag&TLSBSS != 0 {
|
||||||
s.Type = STLSBSS
|
s.Type = STLSBSS
|
||||||
}
|
}
|
||||||
edata = s
|
ctxt.Edata = s
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -298,6 +301,17 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
linkpcln(ctxt, s)
|
linkpcln(ctxt, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add to running list in ctxt.
|
||||||
|
if ctxt.Etext == nil {
|
||||||
|
ctxt.Text = text
|
||||||
|
} else {
|
||||||
|
ctxt.Etext.Next = text
|
||||||
|
}
|
||||||
|
ctxt.Etext = etext
|
||||||
|
ctxt.Plist = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Writeobjfile(ctxt *Link, b *Biobuf) {
|
||||||
// Emit header.
|
// Emit header.
|
||||||
Bputc(b, 0)
|
Bputc(b, 0)
|
||||||
|
|
||||||
|
|
@ -312,10 +326,10 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
|
||||||
wrstring(b, "")
|
wrstring(b, "")
|
||||||
|
|
||||||
// Emit symbols.
|
// Emit symbols.
|
||||||
for s := text; s != nil; s = s.Next {
|
for s := ctxt.Text; s != nil; s = s.Next {
|
||||||
writesym(ctxt, b, s)
|
writesym(ctxt, b, s)
|
||||||
}
|
}
|
||||||
for s := data; s != nil; s = s.Next {
|
for s := ctxt.Data; s != nil; s = s.Next {
|
||||||
writesym(ctxt, b, s)
|
writesym(ctxt, b, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -611,7 +611,7 @@ func RegisterOpcode(lo int, Anames []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func Aconv(a int) string {
|
func Aconv(a int) string {
|
||||||
if a < A_ARCHSPECIFIC {
|
if 0 <= a && a < len(Anames) {
|
||||||
return Anames[a]
|
return Anames[a]
|
||||||
}
|
}
|
||||||
for i := range aSpace {
|
for i := range aSpace {
|
||||||
|
|
@ -643,6 +643,7 @@ var Anames = []string{
|
||||||
"USEFIELD",
|
"USEFIELD",
|
||||||
"VARDEF",
|
"VARDEF",
|
||||||
"VARKILL",
|
"VARKILL",
|
||||||
|
"VARLIVE",
|
||||||
}
|
}
|
||||||
|
|
||||||
func Bool2int(b bool) int {
|
func Bool2int(b bool) int {
|
||||||
|
|
|
||||||
|
|
@ -181,6 +181,7 @@ const (
|
||||||
APAUSE
|
APAUSE
|
||||||
APOPAL
|
APOPAL
|
||||||
APOPAW
|
APOPAW
|
||||||
|
APOPCNT
|
||||||
APOPFL
|
APOPFL
|
||||||
APOPFW
|
APOPFW
|
||||||
APOPL
|
APOPL
|
||||||
|
|
@ -500,6 +501,7 @@ const (
|
||||||
AXADDQ
|
AXADDQ
|
||||||
AXCHGQ
|
AXCHGQ
|
||||||
AXORQ
|
AXORQ
|
||||||
|
AXGETBV
|
||||||
|
|
||||||
// media
|
// media
|
||||||
AADDPD
|
AADDPD
|
||||||
|
|
@ -614,6 +616,9 @@ const (
|
||||||
APCMPGTL
|
APCMPGTL
|
||||||
APCMPGTW
|
APCMPGTW
|
||||||
APEXTRW
|
APEXTRW
|
||||||
|
APEXTRB
|
||||||
|
APEXTRD
|
||||||
|
APEXTRQ
|
||||||
APFACC
|
APFACC
|
||||||
APFADD
|
APFADD
|
||||||
APFCMPEQ
|
APFCMPEQ
|
||||||
|
|
@ -632,6 +637,7 @@ const (
|
||||||
APFSUB
|
APFSUB
|
||||||
APFSUBR
|
APFSUBR
|
||||||
APINSRW
|
APINSRW
|
||||||
|
APINSRB
|
||||||
APINSRD
|
APINSRD
|
||||||
APINSRQ
|
APINSRQ
|
||||||
APMADDWL
|
APMADDWL
|
||||||
|
|
|
||||||
|
|
@ -149,6 +149,7 @@ var Anames = []string{
|
||||||
"PAUSE",
|
"PAUSE",
|
||||||
"POPAL",
|
"POPAL",
|
||||||
"POPAW",
|
"POPAW",
|
||||||
|
"POPCNT",
|
||||||
"POPFL",
|
"POPFL",
|
||||||
"POPFW",
|
"POPFW",
|
||||||
"POPL",
|
"POPL",
|
||||||
|
|
@ -451,6 +452,7 @@ var Anames = []string{
|
||||||
"XADDQ",
|
"XADDQ",
|
||||||
"XCHGQ",
|
"XCHGQ",
|
||||||
"XORQ",
|
"XORQ",
|
||||||
|
"XGETBV",
|
||||||
"ADDPD",
|
"ADDPD",
|
||||||
"ADDPS",
|
"ADDPS",
|
||||||
"ADDSD",
|
"ADDSD",
|
||||||
|
|
@ -563,6 +565,9 @@ var Anames = []string{
|
||||||
"PCMPGTL",
|
"PCMPGTL",
|
||||||
"PCMPGTW",
|
"PCMPGTW",
|
||||||
"PEXTRW",
|
"PEXTRW",
|
||||||
|
"PEXTRB",
|
||||||
|
"PEXTRD",
|
||||||
|
"PEXTRQ",
|
||||||
"PFACC",
|
"PFACC",
|
||||||
"PFADD",
|
"PFADD",
|
||||||
"PFCMPEQ",
|
"PFCMPEQ",
|
||||||
|
|
@ -581,6 +586,7 @@ var Anames = []string{
|
||||||
"PFSUB",
|
"PFSUB",
|
||||||
"PFSUBR",
|
"PFSUBR",
|
||||||
"PINSRW",
|
"PINSRW",
|
||||||
|
"PINSRB",
|
||||||
"PINSRD",
|
"PINSRD",
|
||||||
"PINSRQ",
|
"PINSRQ",
|
||||||
"PMADDWL",
|
"PMADDWL",
|
||||||
|
|
|
||||||
|
|
@ -187,6 +187,7 @@ const (
|
||||||
Zm_r_xm_nr
|
Zm_r_xm_nr
|
||||||
Zr_m_xm_nr
|
Zr_m_xm_nr
|
||||||
Zibm_r /* mmx1,mmx2/mem64,imm8 */
|
Zibm_r /* mmx1,mmx2/mem64,imm8 */
|
||||||
|
Zibr_m
|
||||||
Zmb_r
|
Zmb_r
|
||||||
Zaut_r
|
Zaut_r
|
||||||
Zo_m
|
Zo_m
|
||||||
|
|
@ -219,6 +220,7 @@ const (
|
||||||
Pf2 = 0xf2 /* xmm escape 1: f2 0f */
|
Pf2 = 0xf2 /* xmm escape 1: f2 0f */
|
||||||
Pf3 = 0xf3 /* xmm escape 2: f3 0f */
|
Pf3 = 0xf3 /* xmm escape 2: f3 0f */
|
||||||
Pq3 = 0x67 /* xmm escape 3: 66 48 0f */
|
Pq3 = 0x67 /* xmm escape 3: 66 48 0f */
|
||||||
|
Pfw = 0xf4 /* Pf3 with Rex.w: f3 48 0f */
|
||||||
Pvex1 = 0xc5 /* 66.0f escape, vex encoding */
|
Pvex1 = 0xc5 /* 66.0f escape, vex encoding */
|
||||||
Pvex2 = 0xc6 /* f3.0f escape, vex encoding */
|
Pvex2 = 0xc6 /* f3.0f escape, vex encoding */
|
||||||
Pvex3 = 0xc7 /* 66.0f38 escape, vex encoding */
|
Pvex3 = 0xc7 /* 66.0f38 escape, vex encoding */
|
||||||
|
|
@ -720,6 +722,10 @@ var yextrw = []ytab{
|
||||||
{Yu8, Yxr, Yrl, Zibm_r, 2},
|
{Yu8, Yxr, Yrl, Zibm_r, 2},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var yextr = []ytab{
|
||||||
|
{Yu8, Yxr, Ymm, Zibr_m, 3},
|
||||||
|
}
|
||||||
|
|
||||||
var yinsrw = []ytab{
|
var yinsrw = []ytab{
|
||||||
{Yu8, Yml, Yxr, Zibm_r, 2},
|
{Yu8, Yml, Yxr, Zibm_r, 2},
|
||||||
}
|
}
|
||||||
|
|
@ -1162,6 +1168,9 @@ var optab =
|
||||||
{APCMPGTL, ymm, Py1, [23]uint8{0x66, Pe, 0x66}},
|
{APCMPGTL, ymm, Py1, [23]uint8{0x66, Pe, 0x66}},
|
||||||
{APCMPGTW, ymm, Py1, [23]uint8{0x65, Pe, 0x65}},
|
{APCMPGTW, ymm, Py1, [23]uint8{0x65, Pe, 0x65}},
|
||||||
{APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}},
|
{APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}},
|
||||||
|
{APEXTRB, yextr, Pq, [23]uint8{0x3a, 0x14, 00}},
|
||||||
|
{APEXTRD, yextr, Pq, [23]uint8{0x3a, 0x16, 00}},
|
||||||
|
{APEXTRQ, yextr, Pq3, [23]uint8{0x3a, 0x16, 00}},
|
||||||
{APF2IL, ymfp, Px, [23]uint8{0x1d}},
|
{APF2IL, ymfp, Px, [23]uint8{0x1d}},
|
||||||
{APF2IW, ymfp, Px, [23]uint8{0x1c}},
|
{APF2IW, ymfp, Px, [23]uint8{0x1c}},
|
||||||
{API2FL, ymfp, Px, [23]uint8{0x0d}},
|
{API2FL, ymfp, Px, [23]uint8{0x0d}},
|
||||||
|
|
@ -1183,6 +1192,7 @@ var optab =
|
||||||
{APFSUB, ymfp, Px, [23]uint8{0x9a}},
|
{APFSUB, ymfp, Px, [23]uint8{0x9a}},
|
||||||
{APFSUBR, ymfp, Px, [23]uint8{0xaa}},
|
{APFSUBR, ymfp, Px, [23]uint8{0xaa}},
|
||||||
{APINSRW, yinsrw, Pq, [23]uint8{0xc4, 00}},
|
{APINSRW, yinsrw, Pq, [23]uint8{0xc4, 00}},
|
||||||
|
{APINSRB, yinsr, Pq, [23]uint8{0x3a, 0x20, 00}},
|
||||||
{APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}},
|
{APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}},
|
||||||
{APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}},
|
{APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}},
|
||||||
{APMADDWL, ymm, Py1, [23]uint8{0xf5, Pe, 0xf5}},
|
{APMADDWL, ymm, Py1, [23]uint8{0xf5, Pe, 0xf5}},
|
||||||
|
|
@ -1198,6 +1208,7 @@ var optab =
|
||||||
{APMULULQ, ymm, Py1, [23]uint8{0xf4, Pe, 0xf4}},
|
{APMULULQ, ymm, Py1, [23]uint8{0xf4, Pe, 0xf4}},
|
||||||
{APOPAL, ynone, P32, [23]uint8{0x61}},
|
{APOPAL, ynone, P32, [23]uint8{0x61}},
|
||||||
{APOPAW, ynone, Pe, [23]uint8{0x61}},
|
{APOPAW, ynone, Pe, [23]uint8{0x61}},
|
||||||
|
{APOPCNT, yml_rl, Pfw, [23]uint8{0xb8}},
|
||||||
{APOPFL, ynone, P32, [23]uint8{0x9d}},
|
{APOPFL, ynone, P32, [23]uint8{0x9d}},
|
||||||
{APOPFQ, ynone, Py, [23]uint8{0x9d}},
|
{APOPFQ, ynone, Py, [23]uint8{0x9d}},
|
||||||
{APOPFW, ynone, Pe, [23]uint8{0x9d}},
|
{APOPFW, ynone, Pe, [23]uint8{0x9d}},
|
||||||
|
|
@ -1533,6 +1544,7 @@ var optab =
|
||||||
{AXABORT, yxabort, Px, [23]uint8{0xc6, 0xf8}},
|
{AXABORT, yxabort, Px, [23]uint8{0xc6, 0xf8}},
|
||||||
{AXEND, ynone, Px, [23]uint8{0x0f, 01, 0xd5}},
|
{AXEND, ynone, Px, [23]uint8{0x0f, 01, 0xd5}},
|
||||||
{AXTEST, ynone, Px, [23]uint8{0x0f, 01, 0xd6}},
|
{AXTEST, ynone, Px, [23]uint8{0x0f, 01, 0xd6}},
|
||||||
|
{AXGETBV, ynone, Pm, [23]uint8{01, 0xd0}},
|
||||||
{obj.AUSEFIELD, ynop, Px, [23]uint8{0, 0}},
|
{obj.AUSEFIELD, ynop, Px, [23]uint8{0, 0}},
|
||||||
{obj.ATYPE, nil, 0, [23]uint8{}},
|
{obj.ATYPE, nil, 0, [23]uint8{}},
|
||||||
{obj.AFUNCDATA, yfuncdata, Px, [23]uint8{0, 0}},
|
{obj.AFUNCDATA, yfuncdata, Px, [23]uint8{0, 0}},
|
||||||
|
|
@ -3194,6 +3206,15 @@ func doasm(ctxt *obj.Link, p *obj.Prog) {
|
||||||
ctxt.Andptr[0] = Pm
|
ctxt.Andptr[0] = Pm
|
||||||
ctxt.Andptr = ctxt.Andptr[1:]
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
||||||
|
case Pfw: /* first escape, Rex.w, and second escape */
|
||||||
|
ctxt.Andptr[0] = Pf3
|
||||||
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
||||||
|
ctxt.Andptr[0] = Pw
|
||||||
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
ctxt.Andptr[0] = Pm
|
||||||
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
||||||
case Pm: /* opcode escape */
|
case Pm: /* opcode escape */
|
||||||
ctxt.Andptr[0] = Pm
|
ctxt.Andptr[0] = Pm
|
||||||
ctxt.Andptr = ctxt.Andptr[1:]
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
@ -3343,7 +3364,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) {
|
||||||
ctxt.Andptr[0] = byte(op)
|
ctxt.Andptr[0] = byte(op)
|
||||||
ctxt.Andptr = ctxt.Andptr[1:]
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
||||||
case Zibm_r:
|
case Zibm_r, Zibr_m:
|
||||||
for {
|
for {
|
||||||
tmp1 := z
|
tmp1 := z
|
||||||
z++
|
z++
|
||||||
|
|
@ -3354,7 +3375,11 @@ func doasm(ctxt *obj.Link, p *obj.Prog) {
|
||||||
ctxt.Andptr[0] = byte(op)
|
ctxt.Andptr[0] = byte(op)
|
||||||
ctxt.Andptr = ctxt.Andptr[1:]
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
}
|
}
|
||||||
asmand(ctxt, p, p.From3, &p.To)
|
if yt.zcase == Zibr_m {
|
||||||
|
asmand(ctxt, p, &p.To, p.From3)
|
||||||
|
} else {
|
||||||
|
asmand(ctxt, p, p.From3, &p.To)
|
||||||
|
}
|
||||||
ctxt.Andptr[0] = byte(p.From.Offset)
|
ctxt.Andptr[0] = byte(p.From.Offset)
|
||||||
ctxt.Andptr = ctxt.Andptr[1:]
|
ctxt.Andptr = ctxt.Andptr[1:]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1068,7 +1068,7 @@ func hostlink() {
|
||||||
argv = append(argv, "-pie")
|
argv = append(argv, "-pie")
|
||||||
case BuildmodeCShared:
|
case BuildmodeCShared:
|
||||||
if HEADTYPE == obj.Hdarwin {
|
if HEADTYPE == obj.Hdarwin {
|
||||||
argv = append(argv, "-dynamiclib")
|
argv = append(argv, "-dynamiclib", "-Wl,-read_only_relocs,suppress")
|
||||||
} else {
|
} else {
|
||||||
// ELF.
|
// ELF.
|
||||||
argv = append(argv, "-Wl,-Bsymbolic")
|
argv = append(argv, "-Wl,-Bsymbolic")
|
||||||
|
|
|
||||||
|
|
@ -566,6 +566,25 @@ func Asmbmacho() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if Linkmode == LinkInternal {
|
||||||
|
// For lldb, must say LC_VERSION_MIN_MACOSX or else
|
||||||
|
// it won't know that this Mach-O binary is from OS X
|
||||||
|
// (could be iOS or WatchOS intead).
|
||||||
|
// Go on iOS uses linkmode=external, and linkmode=external
|
||||||
|
// adds this itself. So we only need this code for linkmode=internal
|
||||||
|
// and we can assume OS X.
|
||||||
|
//
|
||||||
|
// See golang.org/issues/12941.
|
||||||
|
const (
|
||||||
|
LC_VERSION_MIN_MACOSX = 0x24
|
||||||
|
LC_VERSION_MIN_IPHONEOS = 0x25
|
||||||
|
LC_VERSION_MIN_WATCHOS = 0x30
|
||||||
|
)
|
||||||
|
ml := newMachoLoad(LC_VERSION_MIN_MACOSX, 2)
|
||||||
|
ml.data[0] = 10<<16 | 7<<8 | 0<<0 // OS X version 10.7.0
|
||||||
|
ml.data[1] = 10<<16 | 7<<8 | 0<<0 // SDK 10.7.0
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: dwarf headers go in ms too
|
// TODO: dwarf headers go in ms too
|
||||||
if Debug['s'] == 0 {
|
if Debug['s'] == 0 {
|
||||||
dwarfaddmachoheaders(ms)
|
dwarfaddmachoheaders(ms)
|
||||||
|
|
|
||||||
|
|
@ -1217,12 +1217,17 @@ func Asmbpe() {
|
||||||
// larger size, as verified with VMMap.
|
// larger size, as verified with VMMap.
|
||||||
|
|
||||||
// Go code would be OK with 64k stacks, but we need larger stacks for cgo.
|
// Go code would be OK with 64k stacks, but we need larger stacks for cgo.
|
||||||
// That default stack reserve size affects only the main thread,
|
//
|
||||||
// for other threads we specify stack size in runtime explicitly
|
// The default stack reserve size affects only the main
|
||||||
|
// thread, ctrlhandler thread, and profileloop thread. For
|
||||||
|
// these, it must be greater than the stack size assumed by
|
||||||
|
// externalthreadhandler.
|
||||||
|
//
|
||||||
|
// For other threads we specify stack size in runtime explicitly
|
||||||
// (runtime knows whether cgo is enabled or not).
|
// (runtime knows whether cgo is enabled or not).
|
||||||
// If you change stack reserve sizes here,
|
// For these, the reserve must match STACKSIZE in
|
||||||
// change STACKSIZE in runtime/cgo/gcc_windows_{386,amd64}.c and correspondent
|
// runtime/cgo/gcc_windows_{386,amd64}.c and the correspondent
|
||||||
// CreateThread parameter in runtime.newosproc as well.
|
// CreateThread parameter in runtime.newosproc.
|
||||||
if !iscgo {
|
if !iscgo {
|
||||||
oh64.SizeOfStackReserve = 0x00020000
|
oh64.SizeOfStackReserve = 0x00020000
|
||||||
oh.SizeOfStackReserve = 0x00020000
|
oh.SizeOfStackReserve = 0x00020000
|
||||||
|
|
|
||||||
|
|
@ -217,8 +217,6 @@ Lexp_dec_loop:
|
||||||
MOVUPS X0, 16(DX)
|
MOVUPS X0, 16(DX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
#define PSHUFD_X0_X0_ BYTE $0x66; BYTE $0x0f; BYTE $0x70; BYTE $0xc0
|
|
||||||
#define PSHUFD_X1_X1_ BYTE $0x66; BYTE $0x0f; BYTE $0x70; BYTE $0xc9
|
|
||||||
TEXT _expand_key_128<>(SB),NOSPLIT,$0
|
TEXT _expand_key_128<>(SB),NOSPLIT,$0
|
||||||
PSHUFD $0xff, X1, X1
|
PSHUFD $0xff, X1, X1
|
||||||
SHUFPS $0x10, X0, X4
|
SHUFPS $0x10, X0, X4
|
||||||
|
|
@ -230,8 +228,6 @@ TEXT _expand_key_128<>(SB),NOSPLIT,$0
|
||||||
ADDQ $16, BX
|
ADDQ $16, BX
|
||||||
RET
|
RET
|
||||||
|
|
||||||
#define PSLLDQ_X5_ BYTE $0x66; BYTE $0x0f; BYTE $0x73; BYTE $0xfd
|
|
||||||
#define PSHUFD_X0_X3_ BYTE $0x66; BYTE $0x0f; BYTE $0x70; BYTE $0xd8
|
|
||||||
TEXT _expand_key_192a<>(SB),NOSPLIT,$0
|
TEXT _expand_key_192a<>(SB),NOSPLIT,$0
|
||||||
PSHUFD $0x55, X1, X1
|
PSHUFD $0x55, X1, X1
|
||||||
SHUFPS $0x10, X0, X4
|
SHUFPS $0x10, X0, X4
|
||||||
|
|
@ -242,7 +238,7 @@ TEXT _expand_key_192a<>(SB),NOSPLIT,$0
|
||||||
|
|
||||||
MOVAPS X2, X5
|
MOVAPS X2, X5
|
||||||
MOVAPS X2, X6
|
MOVAPS X2, X6
|
||||||
PSLLDQ_X5_; BYTE $0x4
|
PSLLDQ $0x4, X5
|
||||||
PSHUFD $0xff, X0, X3
|
PSHUFD $0xff, X0, X3
|
||||||
PXOR X3, X2
|
PXOR X3, X2
|
||||||
PXOR X5, X2
|
PXOR X5, X2
|
||||||
|
|
@ -264,7 +260,7 @@ TEXT _expand_key_192b<>(SB),NOSPLIT,$0
|
||||||
PXOR X1, X0
|
PXOR X1, X0
|
||||||
|
|
||||||
MOVAPS X2, X5
|
MOVAPS X2, X5
|
||||||
PSLLDQ_X5_; BYTE $0x4
|
PSLLDQ $0x4, X5
|
||||||
PSHUFD $0xff, X0, X3
|
PSHUFD $0xff, X0, X3
|
||||||
PXOR X3, X2
|
PXOR X3, X2
|
||||||
PXOR X5, X2
|
PXOR X5, X2
|
||||||
|
|
|
||||||
|
|
@ -345,7 +345,7 @@ TEXT ·gcmAesData(SB),NOSPLIT,$0
|
||||||
PXOR B0, B0
|
PXOR B0, B0
|
||||||
MOVQ (aut), B0
|
MOVQ (aut), B0
|
||||||
PINSRD $2, 8(aut), B0
|
PINSRD $2, 8(aut), B0
|
||||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x20; BYTE $0x46; BYTE $0x0c; BYTE $0x0c //PINSRB $12, 12(aut), B0
|
PINSRB $12, 12(aut), B0
|
||||||
XORQ autLen, autLen
|
XORQ autLen, autLen
|
||||||
JMP dataMul
|
JMP dataMul
|
||||||
|
|
||||||
|
|
@ -404,7 +404,7 @@ dataEnd:
|
||||||
dataLoadLoop:
|
dataLoadLoop:
|
||||||
|
|
||||||
PSLLDQ $1, B0
|
PSLLDQ $1, B0
|
||||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x20; BYTE $0x06; BYTE $0x00 //PINSRB $0, (aut), B0
|
PINSRB $0, (aut), B0
|
||||||
|
|
||||||
LEAQ -1(aut), aut
|
LEAQ -1(aut), aut
|
||||||
DECQ autLen
|
DECQ autLen
|
||||||
|
|
@ -892,7 +892,7 @@ encLast4:
|
||||||
PXOR B0, B0
|
PXOR B0, B0
|
||||||
ptxLoadLoop:
|
ptxLoadLoop:
|
||||||
PSLLDQ $1, B0
|
PSLLDQ $1, B0
|
||||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x20; BYTE $0x06; BYTE $0x00 //PINSRB $0, (ptx), B0
|
PINSRB $0, (ptx), B0
|
||||||
LEAQ -1(ptx), ptx
|
LEAQ -1(ptx), ptx
|
||||||
DECQ ptxLen
|
DECQ ptxLen
|
||||||
JNE ptxLoadLoop
|
JNE ptxLoadLoop
|
||||||
|
|
@ -1264,7 +1264,7 @@ decLast3:
|
||||||
PXOR T1, B0
|
PXOR T1, B0
|
||||||
|
|
||||||
ptxStoreLoop:
|
ptxStoreLoop:
|
||||||
BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x14; BYTE $0x06; BYTE $0x00 // PEXTRB $0, B0, (ptx)
|
PEXTRB $0, B0, (ptx)
|
||||||
PSRLDQ $1, B0
|
PSRLDQ $1, B0
|
||||||
LEAQ 1(ptx), ptx
|
LEAQ 1(ptx), ptx
|
||||||
DECQ ptxLen
|
DECQ ptxLen
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,9 @@ type AEAD interface {
|
||||||
//
|
//
|
||||||
// The ciphertext and dst may alias exactly or not at all. To reuse
|
// The ciphertext and dst may alias exactly or not at all. To reuse
|
||||||
// ciphertext's storage for the decrypted output, use ciphertext[:0] as dst.
|
// ciphertext's storage for the decrypted output, use ciphertext[:0] as dst.
|
||||||
|
//
|
||||||
|
// Even if the function fails, the contents of dst, up to its capacity,
|
||||||
|
// may be overwritten.
|
||||||
Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error)
|
Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -168,11 +171,19 @@ func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||||
var expectedTag [gcmTagSize]byte
|
var expectedTag [gcmTagSize]byte
|
||||||
g.auth(expectedTag[:], ciphertext, data, &tagMask)
|
g.auth(expectedTag[:], ciphertext, data, &tagMask)
|
||||||
|
|
||||||
|
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||||
|
|
||||||
if subtle.ConstantTimeCompare(expectedTag[:], tag) != 1 {
|
if subtle.ConstantTimeCompare(expectedTag[:], tag) != 1 {
|
||||||
|
// The AESNI code decrypts and authenticates concurrently, and
|
||||||
|
// so overwrites dst in the event of a tag mismatch. That
|
||||||
|
// behaviour is mimicked here in order to be consistent across
|
||||||
|
// platforms.
|
||||||
|
for i := range out {
|
||||||
|
out[i] = 0
|
||||||
|
}
|
||||||
return nil, errOpen
|
return nil, errOpen
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, out := sliceForAppend(dst, len(ciphertext))
|
|
||||||
g.counterCrypt(out, ciphertext, &counter)
|
g.counterCrypt(out, ciphertext, &counter)
|
||||||
|
|
||||||
return ret, nil
|
return ret, nil
|
||||||
|
|
|
||||||
|
|
@ -240,3 +240,37 @@ func TestAESGCM(t *testing.T) {
|
||||||
ct[0] ^= 0x80
|
ct[0] ^= 0x80
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTagFailureOverwrite(t *testing.T) {
|
||||||
|
// The AESNI GCM code decrypts and authenticates concurrently and so
|
||||||
|
// overwrites the output buffer before checking the authentication tag.
|
||||||
|
// In order to be consistent across platforms, all implementations
|
||||||
|
// should do this and this test checks that.
|
||||||
|
|
||||||
|
key, _ := hex.DecodeString("ab72c77b97cb5fe9a382d9fe81ffdbed")
|
||||||
|
nonce, _ := hex.DecodeString("54cc7dc2c37ec006bcc6d1db")
|
||||||
|
ciphertext, _ := hex.DecodeString("0e1bde206a07a9c2c1b65300f8c649972b4401346697138c7a4891ee59867d0c")
|
||||||
|
|
||||||
|
aes, _ := aes.NewCipher(key)
|
||||||
|
aesgcm, _ := cipher.NewGCM(aes)
|
||||||
|
|
||||||
|
dst := make([]byte, len(ciphertext)-16)
|
||||||
|
for i := range dst {
|
||||||
|
dst[i] = 42
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := aesgcm.Open(dst[:0], nonce, ciphertext, nil)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("Bad Open still resulted in nil error.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if result != nil {
|
||||||
|
t.Fatal("Failed Open returned non-nil result.")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range dst {
|
||||||
|
if dst[i] != 0 {
|
||||||
|
t.Fatal("Failed Open didn't zero dst buffer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -56,6 +57,11 @@ type Conn struct {
|
||||||
input *block // application data waiting to be read
|
input *block // application data waiting to be read
|
||||||
hand bytes.Buffer // handshake data waiting to be read
|
hand bytes.Buffer // handshake data waiting to be read
|
||||||
|
|
||||||
|
// activeCall is an atomic int32; the low bit is whether Close has
|
||||||
|
// been called. the rest of the bits are the number of goroutines
|
||||||
|
// in Conn.Write.
|
||||||
|
activeCall int32
|
||||||
|
|
||||||
tmp [16]byte
|
tmp [16]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -855,8 +861,22 @@ func (c *Conn) readHandshake() (interface{}, error) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var errClosed = errors.New("crypto/tls: use of closed connection")
|
||||||
|
|
||||||
// Write writes data to the connection.
|
// Write writes data to the connection.
|
||||||
func (c *Conn) Write(b []byte) (int, error) {
|
func (c *Conn) Write(b []byte) (int, error) {
|
||||||
|
// interlock with Close below
|
||||||
|
for {
|
||||||
|
x := atomic.LoadInt32(&c.activeCall)
|
||||||
|
if x&1 != 0 {
|
||||||
|
return 0, errClosed
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) {
|
||||||
|
defer atomic.AddInt32(&c.activeCall, -2)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.Handshake(); err != nil {
|
if err := c.Handshake(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
@ -960,6 +980,27 @@ func (c *Conn) Read(b []byte) (n int, err error) {
|
||||||
|
|
||||||
// Close closes the connection.
|
// Close closes the connection.
|
||||||
func (c *Conn) Close() error {
|
func (c *Conn) Close() error {
|
||||||
|
// Interlock with Conn.Write above.
|
||||||
|
var x int32
|
||||||
|
for {
|
||||||
|
x = atomic.LoadInt32(&c.activeCall)
|
||||||
|
if x&1 != 0 {
|
||||||
|
return errClosed
|
||||||
|
}
|
||||||
|
if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if x != 0 {
|
||||||
|
// io.Writer and io.Closer should not be used concurrently.
|
||||||
|
// If Close is called while a Write is currently in-flight,
|
||||||
|
// interpret that as a sign that this Close is really just
|
||||||
|
// being used to break the Write and/or clean up resources and
|
||||||
|
// avoid sending the alertCloseNotify, which may block
|
||||||
|
// waiting on handshakeMutex or the c.out mutex.
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
var alertErr error
|
var alertErr error
|
||||||
|
|
||||||
c.handshakeMutex.Lock()
|
c.handshakeMutex.Lock()
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ package tls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"internal/testenv"
|
"internal/testenv"
|
||||||
"io"
|
"io"
|
||||||
|
|
@ -364,3 +365,104 @@ func TestVerifyHostnameResumed(t *testing.T) {
|
||||||
c.Close()
|
c.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConnCloseBreakingWrite(t *testing.T) {
|
||||||
|
ln := newLocalListener(t)
|
||||||
|
defer ln.Close()
|
||||||
|
|
||||||
|
srvCh := make(chan *Conn, 1)
|
||||||
|
var serr error
|
||||||
|
var sconn net.Conn
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
sconn, err = ln.Accept()
|
||||||
|
if err != nil {
|
||||||
|
serr = err
|
||||||
|
srvCh <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
serverConfig := *testConfig
|
||||||
|
srv := Server(sconn, &serverConfig)
|
||||||
|
if err := srv.Handshake(); err != nil {
|
||||||
|
serr = fmt.Errorf("handshake: %v", err)
|
||||||
|
srvCh <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srvCh <- srv
|
||||||
|
}()
|
||||||
|
|
||||||
|
cconn, err := net.Dial("tcp", ln.Addr().String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer cconn.Close()
|
||||||
|
|
||||||
|
conn := &changeImplConn{
|
||||||
|
Conn: cconn,
|
||||||
|
}
|
||||||
|
|
||||||
|
clientConfig := *testConfig
|
||||||
|
tconn := Client(conn, &clientConfig)
|
||||||
|
if err := tconn.Handshake(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := <-srvCh
|
||||||
|
if srv == nil {
|
||||||
|
t.Fatal(serr)
|
||||||
|
}
|
||||||
|
defer sconn.Close()
|
||||||
|
|
||||||
|
connClosed := make(chan struct{})
|
||||||
|
conn.closeFunc = func() error {
|
||||||
|
close(connClosed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
inWrite := make(chan bool, 1)
|
||||||
|
var errConnClosed = errors.New("conn closed for test")
|
||||||
|
conn.writeFunc = func(p []byte) (n int, err error) {
|
||||||
|
inWrite <- true
|
||||||
|
<-connClosed
|
||||||
|
return 0, errConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
closeReturned := make(chan bool, 1)
|
||||||
|
go func() {
|
||||||
|
<-inWrite
|
||||||
|
tconn.Close() // test that this doesn't block forever.
|
||||||
|
closeReturned <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = tconn.Write([]byte("foo"))
|
||||||
|
if err != errConnClosed {
|
||||||
|
t.Errorf("Write error = %v; want errConnClosed", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
<-closeReturned
|
||||||
|
if err := tconn.Close(); err != errClosed {
|
||||||
|
t.Errorf("Close error = %v; want errClosed", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeImplConn is a net.Conn which can change its Write and Close
|
||||||
|
// methods.
|
||||||
|
type changeImplConn struct {
|
||||||
|
net.Conn
|
||||||
|
writeFunc func([]byte) (int, error)
|
||||||
|
closeFunc func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *changeImplConn) Write(p []byte) (n int, err error) {
|
||||||
|
if w.writeFunc != nil {
|
||||||
|
return w.writeFunc(p)
|
||||||
|
}
|
||||||
|
return w.Conn.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *changeImplConn) Close() error {
|
||||||
|
if w.closeFunc != nil {
|
||||||
|
return w.closeFunc()
|
||||||
|
}
|
||||||
|
return w.Conn.Close()
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -200,10 +200,15 @@ func IsScanValue(v interface{}) bool {
|
||||||
// ValueConverter that's used when a Stmt doesn't implement
|
// ValueConverter that's used when a Stmt doesn't implement
|
||||||
// ColumnConverter.
|
// ColumnConverter.
|
||||||
//
|
//
|
||||||
// DefaultParameterConverter returns the given value directly if
|
// DefaultParameterConverter returns its argument directly if
|
||||||
// IsValue(value). Otherwise integer type are converted to
|
// IsValue(arg). Otherwise, if the argument implements Valuer, its
|
||||||
// int64, floats to float64, and strings to []byte. Other types are
|
// Value method is used to return a Value. As a fallback, the provided
|
||||||
// an error.
|
// argument's underlying type is used to convert it to a Value:
|
||||||
|
// underlying integer types are converted to int64, floats to float64,
|
||||||
|
// and strings to []byte. If the argument is a nil pointer,
|
||||||
|
// ConvertValue returns a nil Value. If the argument is a non-nil
|
||||||
|
// pointer, it is dereferenced and ConvertValue is called
|
||||||
|
// recursively. Other types are an error.
|
||||||
var DefaultParameterConverter defaultConverter
|
var DefaultParameterConverter defaultConverter
|
||||||
|
|
||||||
type defaultConverter struct{}
|
type defaultConverter struct{}
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,9 @@ var _ = log.Printf
|
||||||
// INSERT|<tablename>|col=val,col2=val2,col3=?
|
// INSERT|<tablename>|col=val,col2=val2,col3=?
|
||||||
// SELECT|<tablename>|projectcol1,projectcol2|filtercol=?,filtercol2=?
|
// SELECT|<tablename>|projectcol1,projectcol2|filtercol=?,filtercol2=?
|
||||||
//
|
//
|
||||||
|
// Any of these can be preceded by PANIC|<method>|, to cause the
|
||||||
|
// named method on fakeStmt to panic.
|
||||||
|
//
|
||||||
// When opening a fakeDriver's database, it starts empty with no
|
// When opening a fakeDriver's database, it starts empty with no
|
||||||
// tables. All tables and data are stored in memory only.
|
// tables. All tables and data are stored in memory only.
|
||||||
type fakeDriver struct {
|
type fakeDriver struct {
|
||||||
|
|
@ -111,6 +114,7 @@ type fakeStmt struct {
|
||||||
|
|
||||||
cmd string
|
cmd string
|
||||||
table string
|
table string
|
||||||
|
panic string
|
||||||
|
|
||||||
closed bool
|
closed bool
|
||||||
|
|
||||||
|
|
@ -499,9 +503,15 @@ func (c *fakeConn) Prepare(query string) (driver.Stmt, error) {
|
||||||
if len(parts) < 1 {
|
if len(parts) < 1 {
|
||||||
return nil, errf("empty query")
|
return nil, errf("empty query")
|
||||||
}
|
}
|
||||||
|
stmt := &fakeStmt{q: query, c: c}
|
||||||
|
if len(parts) >= 3 && parts[0] == "PANIC" {
|
||||||
|
stmt.panic = parts[1]
|
||||||
|
parts = parts[2:]
|
||||||
|
}
|
||||||
cmd := parts[0]
|
cmd := parts[0]
|
||||||
|
stmt.cmd = cmd
|
||||||
parts = parts[1:]
|
parts = parts[1:]
|
||||||
stmt := &fakeStmt{q: query, c: c, cmd: cmd}
|
|
||||||
c.incrStat(&c.stmtsMade)
|
c.incrStat(&c.stmtsMade)
|
||||||
switch cmd {
|
switch cmd {
|
||||||
case "WIPE":
|
case "WIPE":
|
||||||
|
|
@ -524,6 +534,9 @@ func (c *fakeConn) Prepare(query string) (driver.Stmt, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeStmt) ColumnConverter(idx int) driver.ValueConverter {
|
func (s *fakeStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||||
|
if s.panic == "ColumnConverter" {
|
||||||
|
panic(s.panic)
|
||||||
|
}
|
||||||
if len(s.placeholderConverter) == 0 {
|
if len(s.placeholderConverter) == 0 {
|
||||||
return driver.DefaultParameterConverter
|
return driver.DefaultParameterConverter
|
||||||
}
|
}
|
||||||
|
|
@ -531,6 +544,9 @@ func (s *fakeStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeStmt) Close() error {
|
func (s *fakeStmt) Close() error {
|
||||||
|
if s.panic == "Close" {
|
||||||
|
panic(s.panic)
|
||||||
|
}
|
||||||
if s.c == nil {
|
if s.c == nil {
|
||||||
panic("nil conn in fakeStmt.Close")
|
panic("nil conn in fakeStmt.Close")
|
||||||
}
|
}
|
||||||
|
|
@ -550,6 +566,9 @@ var errClosed = errors.New("fakedb: statement has been closed")
|
||||||
var hookExecBadConn func() bool
|
var hookExecBadConn func() bool
|
||||||
|
|
||||||
func (s *fakeStmt) Exec(args []driver.Value) (driver.Result, error) {
|
func (s *fakeStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||||
|
if s.panic == "Exec" {
|
||||||
|
panic(s.panic)
|
||||||
|
}
|
||||||
if s.closed {
|
if s.closed {
|
||||||
return nil, errClosed
|
return nil, errClosed
|
||||||
}
|
}
|
||||||
|
|
@ -634,6 +653,9 @@ func (s *fakeStmt) execInsert(args []driver.Value, doInsert bool) (driver.Result
|
||||||
var hookQueryBadConn func() bool
|
var hookQueryBadConn func() bool
|
||||||
|
|
||||||
func (s *fakeStmt) Query(args []driver.Value) (driver.Rows, error) {
|
func (s *fakeStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||||
|
if s.panic == "Query" {
|
||||||
|
panic(s.panic)
|
||||||
|
}
|
||||||
if s.closed {
|
if s.closed {
|
||||||
return nil, errClosed
|
return nil, errClosed
|
||||||
}
|
}
|
||||||
|
|
@ -716,6 +738,9 @@ rows:
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeStmt) NumInput() int {
|
func (s *fakeStmt) NumInput() int {
|
||||||
|
if s.panic == "NumInput" {
|
||||||
|
panic(s.panic)
|
||||||
|
}
|
||||||
return s.placeholders
|
return s.placeholders
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1477,10 +1477,14 @@ func (s *Stmt) Exec(args ...interface{}) (Result, error) {
|
||||||
return nil, driver.ErrBadConn
|
return nil, driver.ErrBadConn
|
||||||
}
|
}
|
||||||
|
|
||||||
func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) {
|
func driverNumInput(ds driverStmt) int {
|
||||||
ds.Lock()
|
ds.Lock()
|
||||||
want := ds.si.NumInput()
|
defer ds.Unlock() // in case NumInput panics
|
||||||
ds.Unlock()
|
return ds.si.NumInput()
|
||||||
|
}
|
||||||
|
|
||||||
|
func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) {
|
||||||
|
want := driverNumInput(ds)
|
||||||
|
|
||||||
// -1 means the driver doesn't know how to count the number of
|
// -1 means the driver doesn't know how to count the number of
|
||||||
// placeholders, so we won't sanity check input here and instead let the
|
// placeholders, so we won't sanity check input here and instead let the
|
||||||
|
|
@ -1495,8 +1499,8 @@ func resultFromStatement(ds driverStmt, args ...interface{}) (Result, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.Lock()
|
ds.Lock()
|
||||||
|
defer ds.Unlock()
|
||||||
resi, err := ds.si.Exec(dargs)
|
resi, err := ds.si.Exec(dargs)
|
||||||
ds.Unlock()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -1927,6 +1931,6 @@ func stack() string {
|
||||||
// withLock runs while holding lk.
|
// withLock runs while holding lk.
|
||||||
func withLock(lk sync.Locker, fn func()) {
|
func withLock(lk sync.Locker, fn func()) {
|
||||||
lk.Lock()
|
lk.Lock()
|
||||||
|
defer lk.Unlock() // in case fn panics
|
||||||
fn()
|
fn()
|
||||||
lk.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -68,6 +68,46 @@ func newTestDB(t testing.TB, name string) *DB {
|
||||||
return db
|
return db
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDriverPanic(t *testing.T) {
|
||||||
|
// Test that if driver panics, database/sql does not deadlock.
|
||||||
|
db, err := Open("test", fakeDBName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Open: %v", err)
|
||||||
|
}
|
||||||
|
expectPanic := func(name string, f func()) {
|
||||||
|
defer func() {
|
||||||
|
err := recover()
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("%s did not panic", name)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
|
||||||
|
expectPanic("Exec Exec", func() { db.Exec("PANIC|Exec|WIPE") })
|
||||||
|
exec(t, db, "WIPE") // check not deadlocked
|
||||||
|
expectPanic("Exec NumInput", func() { db.Exec("PANIC|NumInput|WIPE") })
|
||||||
|
exec(t, db, "WIPE") // check not deadlocked
|
||||||
|
expectPanic("Exec Close", func() { db.Exec("PANIC|Close|WIPE") })
|
||||||
|
exec(t, db, "WIPE") // check not deadlocked
|
||||||
|
exec(t, db, "PANIC|Query|WIPE") // should run successfully: Exec does not call Query
|
||||||
|
exec(t, db, "WIPE") // check not deadlocked
|
||||||
|
|
||||||
|
exec(t, db, "CREATE|people|name=string,age=int32,photo=blob,dead=bool,bdate=datetime")
|
||||||
|
|
||||||
|
expectPanic("Query Query", func() { db.Query("PANIC|Query|SELECT|people|age,name|") })
|
||||||
|
expectPanic("Query NumInput", func() { db.Query("PANIC|NumInput|SELECT|people|age,name|") })
|
||||||
|
expectPanic("Query Close", func() {
|
||||||
|
rows, err := db.Query("PANIC|Close|SELECT|people|age,name|")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
rows.Close()
|
||||||
|
})
|
||||||
|
db.Query("PANIC|Exec|SELECT|people|age,name|") // should run successfully: Query does not call Exec
|
||||||
|
exec(t, db, "WIPE") // check not deadlocked
|
||||||
|
}
|
||||||
|
|
||||||
func exec(t testing.TB, db *DB, query string, args ...interface{}) {
|
func exec(t testing.TB, db *DB, query string, args ...interface{}) {
|
||||||
_, err := db.Exec(query, args...)
|
_, err := db.Exec(query, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
7
src/debug/dwarf/testdata/cycle.c
vendored
Normal file
7
src/debug/dwarf/testdata/cycle.c
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
typedef struct aaa *AAA;
|
||||||
|
typedef AAA BBB;
|
||||||
|
struct aaa { BBB val; };
|
||||||
|
|
||||||
|
AAA x(void) {
|
||||||
|
return (AAA)0;
|
||||||
|
}
|
||||||
BIN
src/debug/dwarf/testdata/cycle.elf
vendored
Normal file
BIN
src/debug/dwarf/testdata/cycle.elf
vendored
Normal file
Binary file not shown.
|
|
@ -275,12 +275,14 @@ type typeReader interface {
|
||||||
|
|
||||||
// Type reads the type at off in the DWARF ``info'' section.
|
// Type reads the type at off in the DWARF ``info'' section.
|
||||||
func (d *Data) Type(off Offset) (Type, error) {
|
func (d *Data) Type(off Offset) (Type, error) {
|
||||||
return d.readType("info", d.Reader(), off, d.typeCache)
|
return d.readType("info", d.Reader(), off, d.typeCache, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readType reads a type from r at off of name using and updating a
|
// readType reads a type from r at off of name. It adds types to the
|
||||||
// type cache.
|
// type cache, appends new typedef types to typedefs, and computes the
|
||||||
func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Offset]Type) (Type, error) {
|
// sizes of types. Callers should pass nil for typedefs; this is used
|
||||||
|
// for internal recursion.
|
||||||
|
func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Offset]Type, typedefs *[]*TypedefType) (Type, error) {
|
||||||
if t, ok := typeCache[off]; ok {
|
if t, ok := typeCache[off]; ok {
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
@ -294,9 +296,24 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off
|
||||||
return nil, DecodeError{name, off, "no type at offset"}
|
return nil, DecodeError{name, off, "no type at offset"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this is the root of the recursion, prepare to resolve
|
||||||
|
// typedef sizes once the recursion is done. This must be done
|
||||||
|
// after the type graph is constructed because it may need to
|
||||||
|
// resolve cycles in a different order than readType
|
||||||
|
// encounters them.
|
||||||
|
if typedefs == nil {
|
||||||
|
var typedefList []*TypedefType
|
||||||
|
defer func() {
|
||||||
|
for _, t := range typedefList {
|
||||||
|
t.Common().ByteSize = t.Type.Size()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
typedefs = &typedefList
|
||||||
|
}
|
||||||
|
|
||||||
// Parse type from Entry.
|
// Parse type from Entry.
|
||||||
// Must always set typeCache[off] before calling
|
// Must always set typeCache[off] before calling
|
||||||
// d.Type recursively, to handle circular types correctly.
|
// d.readType recursively, to handle circular types correctly.
|
||||||
var typ Type
|
var typ Type
|
||||||
|
|
||||||
nextDepth := 0
|
nextDepth := 0
|
||||||
|
|
@ -345,7 +362,7 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off
|
||||||
var t Type
|
var t Type
|
||||||
switch toff := tval.(type) {
|
switch toff := tval.(type) {
|
||||||
case Offset:
|
case Offset:
|
||||||
if t, err = d.readType(name, r.clone(), toff, typeCache); err != nil {
|
if t, err = d.readType(name, r.clone(), toff, typeCache, typedefs); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
case uint64:
|
case uint64:
|
||||||
|
|
@ -674,7 +691,10 @@ func (d *Data) readType(name string, r typeReader, off Offset, typeCache map[Off
|
||||||
b = -1
|
b = -1
|
||||||
switch t := typ.(type) {
|
switch t := typ.(type) {
|
||||||
case *TypedefType:
|
case *TypedefType:
|
||||||
b = t.Type.Size()
|
// Record that we need to resolve this
|
||||||
|
// type's size once the type graph is
|
||||||
|
// constructed.
|
||||||
|
*typedefs = append(*typedefs, t)
|
||||||
case *PtrType:
|
case *PtrType:
|
||||||
b = int64(addressSize)
|
b = int64(addressSize)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -120,3 +120,37 @@ func testTypedefs(t *testing.T, d *Data, kind string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTypedefCycle(t *testing.T) {
|
||||||
|
// See issue #13039: reading a typedef cycle starting from a
|
||||||
|
// different place than the size needed to be computed from
|
||||||
|
// used to crash.
|
||||||
|
//
|
||||||
|
// cycle.elf built with GCC 4.8.4:
|
||||||
|
// gcc -g -c -o cycle.elf cycle.c
|
||||||
|
d := elfData(t, "testdata/cycle.elf")
|
||||||
|
r := d.Reader()
|
||||||
|
offsets := []Offset{}
|
||||||
|
for {
|
||||||
|
e, err := r.Next()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("r.Next:", err)
|
||||||
|
}
|
||||||
|
if e == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch e.Tag {
|
||||||
|
case TagBaseType, TagTypedef, TagPointerType, TagStructType:
|
||||||
|
offsets = append(offsets, e.Offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse each type with a fresh type cache.
|
||||||
|
for _, offset := range offsets {
|
||||||
|
d := elfData(t, "testdata/cycle.elf")
|
||||||
|
_, err := d.Type(offset)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("d.Type(0x%x): %s", offset, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -101,7 +101,7 @@ func (d *Data) sigToType(sig uint64) (Type, error) {
|
||||||
|
|
||||||
b := makeBuf(d, tu, tu.name, tu.off, tu.data)
|
b := makeBuf(d, tu, tu.name, tu.off, tu.data)
|
||||||
r := &typeUnitReader{d: d, tu: tu, b: b}
|
r := &typeUnitReader{d: d, tu: tu, b: b}
|
||||||
t, err := d.readType(tu.name, r, Offset(tu.toff), make(map[Offset]Type))
|
t, err := d.readType(tu.name, r, Offset(tu.toff), make(map[Offset]Type), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -346,21 +346,18 @@ func (enc *Encoding) DecodeString(s string) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type decoder struct {
|
type decoder struct {
|
||||||
err error
|
err error
|
||||||
enc *Encoding
|
readErr error // error from r.Read
|
||||||
r io.Reader
|
enc *Encoding
|
||||||
end bool // saw end of message
|
r io.Reader
|
||||||
buf [1024]byte // leftover input
|
end bool // saw end of message
|
||||||
nbuf int
|
buf [1024]byte // leftover input
|
||||||
out []byte // leftover decoded output
|
nbuf int
|
||||||
outbuf [1024 / 4 * 3]byte
|
out []byte // leftover decoded output
|
||||||
|
outbuf [1024 / 4 * 3]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *decoder) Read(p []byte) (n int, err error) {
|
func (d *decoder) Read(p []byte) (n int, err error) {
|
||||||
if d.err != nil {
|
|
||||||
return 0, d.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use leftover decoded output from last read.
|
// Use leftover decoded output from last read.
|
||||||
if len(d.out) > 0 {
|
if len(d.out) > 0 {
|
||||||
n = copy(p, d.out)
|
n = copy(p, d.out)
|
||||||
|
|
@ -368,19 +365,46 @@ func (d *decoder) Read(p []byte) (n int, err error) {
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.err != nil {
|
||||||
|
return 0, d.err
|
||||||
|
}
|
||||||
|
|
||||||
// This code assumes that d.r strips supported whitespace ('\r' and '\n').
|
// This code assumes that d.r strips supported whitespace ('\r' and '\n').
|
||||||
|
|
||||||
// Read a chunk.
|
// Refill buffer.
|
||||||
nn := len(p) / 3 * 4
|
for d.nbuf < 4 && d.readErr == nil {
|
||||||
if nn < 4 {
|
nn := len(p) / 3 * 4
|
||||||
nn = 4
|
if nn < 4 {
|
||||||
|
nn = 4
|
||||||
|
}
|
||||||
|
if nn > len(d.buf) {
|
||||||
|
nn = len(d.buf)
|
||||||
|
}
|
||||||
|
nn, d.readErr = d.r.Read(d.buf[d.nbuf:nn])
|
||||||
|
d.nbuf += nn
|
||||||
}
|
}
|
||||||
if nn > len(d.buf) {
|
|
||||||
nn = len(d.buf)
|
if d.nbuf < 4 {
|
||||||
}
|
if d.enc.padChar == NoPadding && d.nbuf > 0 {
|
||||||
nn, d.err = io.ReadAtLeast(d.r, d.buf[d.nbuf:nn], 4-d.nbuf)
|
// Decode final fragment, without padding.
|
||||||
d.nbuf += nn
|
var nw int
|
||||||
if d.err != nil || d.nbuf < 4 {
|
nw, _, d.err = d.enc.decode(d.outbuf[:], d.buf[:d.nbuf])
|
||||||
|
d.nbuf = 0
|
||||||
|
d.end = true
|
||||||
|
d.out = d.outbuf[:nw]
|
||||||
|
n = copy(p, d.out)
|
||||||
|
d.out = d.out[n:]
|
||||||
|
if n > 0 || len(p) == 0 && len(d.out) > 0 {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if d.err != nil {
|
||||||
|
return 0, d.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.err = d.readErr
|
||||||
|
if d.err == io.EOF && d.nbuf > 0 {
|
||||||
|
d.err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
return 0, d.err
|
return 0, d.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -396,13 +420,7 @@ func (d *decoder) Read(p []byte) (n int, err error) {
|
||||||
n, d.end, d.err = d.enc.decode(p, d.buf[:nr])
|
n, d.end, d.err = d.enc.decode(p, d.buf[:nr])
|
||||||
}
|
}
|
||||||
d.nbuf -= nr
|
d.nbuf -= nr
|
||||||
for i := 0; i < d.nbuf; i++ {
|
copy(d.buf[:d.nbuf], d.buf[nr:])
|
||||||
d.buf[i] = d.buf[i+nr]
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.err == nil {
|
|
||||||
d.err = err
|
|
||||||
}
|
|
||||||
return n, d.err
|
return n, d.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -406,3 +406,28 @@ func BenchmarkDecodeString(b *testing.B) {
|
||||||
StdEncoding.DecodeString(data)
|
StdEncoding.DecodeString(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDecoderRaw(t *testing.T) {
|
||||||
|
source := "AAAAAA"
|
||||||
|
want := []byte{0, 0, 0, 0}
|
||||||
|
|
||||||
|
// Direct.
|
||||||
|
dec1, err := RawURLEncoding.DecodeString(source)
|
||||||
|
if err != nil || !bytes.Equal(dec1, want) {
|
||||||
|
t.Errorf("RawURLEncoding.DecodeString(%q) = %x, %v, want %x, nil", source, dec1, err, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Through reader. Used to fail.
|
||||||
|
r := NewDecoder(RawURLEncoding, bytes.NewReader([]byte(source)))
|
||||||
|
dec2, err := ioutil.ReadAll(io.LimitReader(r, 100))
|
||||||
|
if err != nil || !bytes.Equal(dec2, want) {
|
||||||
|
t.Errorf("reading NewDecoder(RawURLEncoding, %q) = %x, %v, want %x, nil", source, dec2, err, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should work with padding.
|
||||||
|
r = NewDecoder(URLEncoding, bytes.NewReader([]byte(source+"==")))
|
||||||
|
dec3, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil || !bytes.Equal(dec3, want) {
|
||||||
|
t.Errorf("reading NewDecoder(URLEncoding, %q) = %x, %v, want %x, nil", source+"==", dec3, err, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ import (
|
||||||
// To unmarshal JSON into a struct, Unmarshal matches incoming object
|
// To unmarshal JSON into a struct, Unmarshal matches incoming object
|
||||||
// keys to the keys used by Marshal (either the struct field name or its tag),
|
// keys to the keys used by Marshal (either the struct field name or its tag),
|
||||||
// preferring an exact match but also accepting a case-insensitive match.
|
// preferring an exact match but also accepting a case-insensitive match.
|
||||||
|
// Unmarshal will only set exported fields of the struct.
|
||||||
//
|
//
|
||||||
// To unmarshal JSON into an interface value,
|
// To unmarshal JSON into an interface value,
|
||||||
// Unmarshal stores one of these in the interface value:
|
// Unmarshal stores one of these in the interface value:
|
||||||
|
|
|
||||||
|
|
@ -344,18 +344,20 @@ const (
|
||||||
// See golang.org/s/go14customimport for more information.
|
// See golang.org/s/go14customimport for more information.
|
||||||
ImportComment
|
ImportComment
|
||||||
|
|
||||||
// If AllowVendor is set, Import searches vendor directories
|
// By default, Import searches vendor directories
|
||||||
// that apply in the given source directory before searching
|
// that apply in the given source directory before searching
|
||||||
// the GOROOT and GOPATH roots.
|
// the GOROOT and GOPATH roots.
|
||||||
// If an Import finds and returns a package using a vendor
|
// If an Import finds and returns a package using a vendor
|
||||||
// directory, the resulting ImportPath is the complete path
|
// directory, the resulting ImportPath is the complete path
|
||||||
// to the package, including the path elements leading up
|
// to the package, including the path elements leading up
|
||||||
// to and including "vendor".
|
// to and including "vendor".
|
||||||
// For example, if Import("y", "x/subdir", AllowVendor) finds
|
// For example, if Import("y", "x/subdir", 0) finds
|
||||||
// "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
|
// "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
|
||||||
// not plain "y".
|
// not plain "y".
|
||||||
// See golang.org/s/go15vendor for more information.
|
// See golang.org/s/go15vendor for more information.
|
||||||
AllowVendor
|
//
|
||||||
|
// Setting IgnoreVendor ignores vendor directories.
|
||||||
|
IgnoreVendor
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Package describes the Go package found in a directory.
|
// A Package describes the Go package found in a directory.
|
||||||
|
|
@ -571,7 +573,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
|
||||||
gopath := ctxt.gopath()
|
gopath := ctxt.gopath()
|
||||||
|
|
||||||
// Vendor directories get first chance to satisfy import.
|
// Vendor directories get first chance to satisfy import.
|
||||||
if mode&AllowVendor != 0 && srcDir != "" {
|
if mode&IgnoreVendor == 0 && srcDir != "" {
|
||||||
searchVendor := func(root string, isGoroot bool) bool {
|
searchVendor := func(root string, isGoroot bool) bool {
|
||||||
sub, ok := ctxt.hasSubdir(root, srcDir)
|
sub, ok := ctxt.hasSubdir(root, srcDir)
|
||||||
if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
|
if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
|
||||||
|
|
@ -581,7 +583,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa
|
||||||
vendor := ctxt.joinPath(root, sub, "vendor")
|
vendor := ctxt.joinPath(root, sub, "vendor")
|
||||||
if ctxt.isDir(vendor) {
|
if ctxt.isDir(vendor) {
|
||||||
dir := ctxt.joinPath(vendor, path)
|
dir := ctxt.joinPath(vendor, path)
|
||||||
if ctxt.isDir(dir) {
|
if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) {
|
||||||
p.Dir = dir
|
p.Dir = dir
|
||||||
p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
|
p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
|
||||||
p.Goroot = isGoroot
|
p.Goroot = isGoroot
|
||||||
|
|
@ -882,6 +884,20 @@ Found:
|
||||||
return p, pkgerr
|
return p, pkgerr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hasGoFiles reports whether dir contains any files with names ending in .go.
|
||||||
|
// For a vendor check we must exclude directories that contain no .go files.
|
||||||
|
// Otherwise it is not possible to vendor just a/b/c and still import the
|
||||||
|
// non-vendored a/b. See golang.org/issue/13832.
|
||||||
|
func hasGoFiles(ctxt *Context, dir string) bool {
|
||||||
|
ents, _ := ctxt.readDir(dir)
|
||||||
|
for _, ent := range ents {
|
||||||
|
if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func findImportComment(data []byte) (s string, line int) {
|
func findImportComment(data []byte) (s string, line int) {
|
||||||
// expect keyword package
|
// expect keyword package
|
||||||
word, data := parseWord(data)
|
word, data := parseWord(data)
|
||||||
|
|
|
||||||
|
|
@ -303,7 +303,7 @@ func TestImportVendor(t *testing.T) {
|
||||||
testenv.MustHaveGoBuild(t) // really must just have source
|
testenv.MustHaveGoBuild(t) // really must just have source
|
||||||
ctxt := Default
|
ctxt := Default
|
||||||
ctxt.GOPATH = ""
|
ctxt.GOPATH = ""
|
||||||
p, err := ctxt.Import("golang.org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), AllowVendor)
|
p, err := ctxt.Import("golang.org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("cannot find vendored golang.org/x/net/http2/hpack from net/http directory: %v", err)
|
t.Fatalf("cannot find vendored golang.org/x/net/http2/hpack from net/http directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -317,7 +317,7 @@ func TestImportVendorFailure(t *testing.T) {
|
||||||
testenv.MustHaveGoBuild(t) // really must just have source
|
testenv.MustHaveGoBuild(t) // really must just have source
|
||||||
ctxt := Default
|
ctxt := Default
|
||||||
ctxt.GOPATH = ""
|
ctxt.GOPATH = ""
|
||||||
p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOROOT, "src/net/http"), AllowVendor)
|
p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("found made-up package x.com/y/z in %s", p.Dir)
|
t.Fatalf("found made-up package x.com/y/z in %s", p.Dir)
|
||||||
}
|
}
|
||||||
|
|
@ -327,3 +327,21 @@ func TestImportVendorFailure(t *testing.T) {
|
||||||
t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e)
|
t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestImportVendorParentFailure(t *testing.T) {
|
||||||
|
testenv.MustHaveGoBuild(t) // really must just have source
|
||||||
|
ctxt := Default
|
||||||
|
ctxt.GOPATH = ""
|
||||||
|
// This import should fail because the vendor/golang.org/x/net/http2 directory has no source code.
|
||||||
|
p, err := ctxt.Import("golang.org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("found empty parent in %s", p.Dir)
|
||||||
|
}
|
||||||
|
if p != nil && p.Dir != "" {
|
||||||
|
t.Fatalf("decided to use %s", p.Dir)
|
||||||
|
}
|
||||||
|
e := err.Error()
|
||||||
|
if !strings.Contains(e, " (vendor tree)") {
|
||||||
|
t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -151,10 +151,11 @@ type reader struct {
|
||||||
notes map[string][]*Note
|
notes map[string][]*Note
|
||||||
|
|
||||||
// declarations
|
// declarations
|
||||||
imports map[string]int
|
imports map[string]int
|
||||||
values []*Value // consts and vars
|
hasDotImp bool // if set, package contains a dot import
|
||||||
types map[string]*namedType
|
values []*Value // consts and vars
|
||||||
funcs methodSet
|
types map[string]*namedType
|
||||||
|
funcs methodSet
|
||||||
|
|
||||||
// support for package-local error type declarations
|
// support for package-local error type declarations
|
||||||
errorDecl bool // if set, type "error" was declared locally
|
errorDecl bool // if set, type "error" was declared locally
|
||||||
|
|
@ -471,6 +472,9 @@ func (r *reader) readFile(src *ast.File) {
|
||||||
if s, ok := spec.(*ast.ImportSpec); ok {
|
if s, ok := spec.(*ast.ImportSpec); ok {
|
||||||
if import_, err := strconv.Unquote(s.Path.Value); err == nil {
|
if import_, err := strconv.Unquote(s.Path.Value); err == nil {
|
||||||
r.imports[import_] = 1
|
r.imports[import_] = 1
|
||||||
|
if s.Name != nil && s.Name.Name == "." {
|
||||||
|
r.hasDotImp = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -641,11 +645,12 @@ func (r *reader) computeMethodSets() {
|
||||||
func (r *reader) cleanupTypes() {
|
func (r *reader) cleanupTypes() {
|
||||||
for _, t := range r.types {
|
for _, t := range r.types {
|
||||||
visible := r.isVisible(t.name)
|
visible := r.isVisible(t.name)
|
||||||
if t.decl == nil && (predeclaredTypes[t.name] || t.isEmbedded && visible) {
|
if t.decl == nil && (predeclaredTypes[t.name] || visible && (t.isEmbedded || r.hasDotImp)) {
|
||||||
// t.name is a predeclared type (and was not redeclared in this package),
|
// t.name is a predeclared type (and was not redeclared in this package),
|
||||||
// or it was embedded somewhere but its declaration is missing (because
|
// or it was embedded somewhere but its declaration is missing (because
|
||||||
// the AST is incomplete): move any associated values, funcs, and methods
|
// the AST is incomplete), or we have a dot-import (and all bets are off):
|
||||||
// back to the top-level so that they are not lost.
|
// move any associated values, funcs, and methods back to the top-level so
|
||||||
|
// that they are not lost.
|
||||||
// 1) move values
|
// 1) move values
|
||||||
r.values = append(r.values, t.values...)
|
r.values = append(r.values, t.values...)
|
||||||
// 2) move factory functions
|
// 2) move factory functions
|
||||||
|
|
|
||||||
25
src/go/doc/testdata/issue13742.0.golden
vendored
Normal file
25
src/go/doc/testdata/issue13742.0.golden
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
//
|
||||||
|
PACKAGE issue13742
|
||||||
|
|
||||||
|
IMPORTPATH
|
||||||
|
testdata/issue13742
|
||||||
|
|
||||||
|
IMPORTS
|
||||||
|
go/ast
|
||||||
|
|
||||||
|
FILENAMES
|
||||||
|
testdata/issue13742.go
|
||||||
|
|
||||||
|
FUNCTIONS
|
||||||
|
// Both F0 and G0 should appear as functions.
|
||||||
|
func F0(Node)
|
||||||
|
|
||||||
|
// Both F1 and G1 should appear as functions.
|
||||||
|
func F1(ast.Node)
|
||||||
|
|
||||||
|
//
|
||||||
|
func G0() Node
|
||||||
|
|
||||||
|
//
|
||||||
|
func G1() ast.Node
|
||||||
|
|
||||||
25
src/go/doc/testdata/issue13742.1.golden
vendored
Normal file
25
src/go/doc/testdata/issue13742.1.golden
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
//
|
||||||
|
PACKAGE issue13742
|
||||||
|
|
||||||
|
IMPORTPATH
|
||||||
|
testdata/issue13742
|
||||||
|
|
||||||
|
IMPORTS
|
||||||
|
go/ast
|
||||||
|
|
||||||
|
FILENAMES
|
||||||
|
testdata/issue13742.go
|
||||||
|
|
||||||
|
FUNCTIONS
|
||||||
|
// Both F0 and G0 should appear as functions.
|
||||||
|
func F0(Node)
|
||||||
|
|
||||||
|
// Both F1 and G1 should appear as functions.
|
||||||
|
func F1(ast.Node)
|
||||||
|
|
||||||
|
//
|
||||||
|
func G0() Node
|
||||||
|
|
||||||
|
//
|
||||||
|
func G1() ast.Node
|
||||||
|
|
||||||
25
src/go/doc/testdata/issue13742.2.golden
vendored
Normal file
25
src/go/doc/testdata/issue13742.2.golden
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
//
|
||||||
|
PACKAGE issue13742
|
||||||
|
|
||||||
|
IMPORTPATH
|
||||||
|
testdata/issue13742
|
||||||
|
|
||||||
|
IMPORTS
|
||||||
|
go/ast
|
||||||
|
|
||||||
|
FILENAMES
|
||||||
|
testdata/issue13742.go
|
||||||
|
|
||||||
|
FUNCTIONS
|
||||||
|
// Both F0 and G0 should appear as functions.
|
||||||
|
func F0(Node)
|
||||||
|
|
||||||
|
// Both F1 and G1 should appear as functions.
|
||||||
|
func F1(ast.Node)
|
||||||
|
|
||||||
|
//
|
||||||
|
func G0() Node
|
||||||
|
|
||||||
|
//
|
||||||
|
func G1() ast.Node
|
||||||
|
|
||||||
18
src/go/doc/testdata/issue13742.go
vendored
Normal file
18
src/go/doc/testdata/issue13742.go
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package issue13742
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/ast"
|
||||||
|
. "go/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Both F0 and G0 should appear as functions.
|
||||||
|
func F0(Node) {}
|
||||||
|
func G0() Node { return nil }
|
||||||
|
|
||||||
|
// Both F1 and G1 should appear as functions.
|
||||||
|
func F1(ast.Node) {}
|
||||||
|
func G1() ast.Node { return nil }
|
||||||
|
|
@ -20,31 +20,37 @@ type Lookup func(path string) (io.ReadCloser, error)
|
||||||
// For returns an Importer for the given compiler and lookup interface,
|
// For returns an Importer for the given compiler and lookup interface,
|
||||||
// or nil. Supported compilers are "gc", and "gccgo". If lookup is nil,
|
// or nil. Supported compilers are "gc", and "gccgo". If lookup is nil,
|
||||||
// the default package lookup mechanism for the given compiler is used.
|
// the default package lookup mechanism for the given compiler is used.
|
||||||
|
// BUG(issue13847): For does not support non-nil lookup functions.
|
||||||
func For(compiler string, lookup Lookup) types.Importer {
|
func For(compiler string, lookup Lookup) types.Importer {
|
||||||
switch compiler {
|
switch compiler {
|
||||||
case "gc":
|
case "gc":
|
||||||
if lookup == nil {
|
if lookup != nil {
|
||||||
return make(gcimports)
|
panic("gc importer for custom import path lookup not yet implemented")
|
||||||
}
|
}
|
||||||
panic("gc importer for custom import path lookup not yet implemented")
|
|
||||||
|
return make(gcimports)
|
||||||
|
|
||||||
case "gccgo":
|
case "gccgo":
|
||||||
if lookup == nil {
|
if lookup == nil {
|
||||||
var inst gccgoimporter.GccgoInstallation
|
panic("gccgo importer for custom import path lookup not yet implemented")
|
||||||
if err := inst.InitFromDriver("gccgo"); err != nil {
|
}
|
||||||
return nil
|
|
||||||
}
|
var inst gccgoimporter.GccgoInstallation
|
||||||
return &gccgoimports{
|
if err := inst.InitFromDriver("gccgo"); err != nil {
|
||||||
packages: make(map[string]*types.Package),
|
return nil
|
||||||
importer: inst.GetImporter(nil, nil),
|
}
|
||||||
}
|
return &gccgoimports{
|
||||||
|
packages: make(map[string]*types.Package),
|
||||||
|
importer: inst.GetImporter(nil, nil),
|
||||||
}
|
}
|
||||||
panic("gccgo importer for custom import path lookup not yet implemented")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// compiler not supported
|
// compiler not supported
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default returns an Importer for the compiler that built the running binary.
|
// Default returns an Importer for the compiler that built the running binary.
|
||||||
|
// If available, the result implements types.ImporterFrom.
|
||||||
func Default() types.Importer {
|
func Default() types.Importer {
|
||||||
return For(runtime.Compiler, nil)
|
return For(runtime.Compiler, nil)
|
||||||
}
|
}
|
||||||
|
|
@ -54,7 +60,14 @@ func Default() types.Importer {
|
||||||
type gcimports map[string]*types.Package
|
type gcimports map[string]*types.Package
|
||||||
|
|
||||||
func (m gcimports) Import(path string) (*types.Package, error) {
|
func (m gcimports) Import(path string) (*types.Package, error) {
|
||||||
return gcimporter.Import(m, path)
|
return m.ImportFrom(path, "" /* no vendoring */, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m gcimports) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
|
||||||
|
if mode != 0 {
|
||||||
|
panic("mode must be 0")
|
||||||
|
}
|
||||||
|
return gcimporter.Import(m, path, srcDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gccgo support
|
// gccgo support
|
||||||
|
|
@ -65,5 +78,13 @@ type gccgoimports struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *gccgoimports) Import(path string) (*types.Package, error) {
|
func (m *gccgoimports) Import(path string) (*types.Package, error) {
|
||||||
|
return m.ImportFrom(path, "" /* no vendoring */, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gccgoimports) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) {
|
||||||
|
if mode != 0 {
|
||||||
|
panic("mode must be 0")
|
||||||
|
}
|
||||||
|
// TODO(gri) pass srcDir
|
||||||
return m.importer(m.packages, path)
|
return m.importer(m.packages, path)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i
|
||||||
// read consts
|
// read consts
|
||||||
for i := p.int(); i > 0; i-- {
|
for i := p.int(); i > 0; i-- {
|
||||||
name := p.string()
|
name := p.string()
|
||||||
typ := p.typ()
|
typ := p.typ(nil)
|
||||||
val := p.value()
|
val := p.value()
|
||||||
p.declare(types.NewConst(token.NoPos, pkg, name, typ, val))
|
p.declare(types.NewConst(token.NoPos, pkg, name, typ, val))
|
||||||
}
|
}
|
||||||
|
|
@ -80,14 +80,14 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i
|
||||||
// read vars
|
// read vars
|
||||||
for i := p.int(); i > 0; i-- {
|
for i := p.int(); i > 0; i-- {
|
||||||
name := p.string()
|
name := p.string()
|
||||||
typ := p.typ()
|
typ := p.typ(nil)
|
||||||
p.declare(types.NewVar(token.NoPos, pkg, name, typ))
|
p.declare(types.NewVar(token.NoPos, pkg, name, typ))
|
||||||
}
|
}
|
||||||
|
|
||||||
// read funcs
|
// read funcs
|
||||||
for i := p.int(); i > 0; i-- {
|
for i := p.int(); i > 0; i-- {
|
||||||
name := p.string()
|
name := p.string()
|
||||||
sig := p.typ().(*types.Signature)
|
sig := p.typ(nil).(*types.Signature)
|
||||||
p.int() // read and discard index of inlined function body
|
p.int() // read and discard index of inlined function body
|
||||||
p.declare(types.NewFunc(token.NoPos, pkg, name, sig))
|
p.declare(types.NewFunc(token.NoPos, pkg, name, sig))
|
||||||
}
|
}
|
||||||
|
|
@ -97,7 +97,7 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i
|
||||||
// name is parsed as part of named type and the
|
// name is parsed as part of named type and the
|
||||||
// type object is added to scope via respective
|
// type object is added to scope via respective
|
||||||
// named type
|
// named type
|
||||||
_ = p.typ().(*types.Named)
|
_ = p.typ(nil).(*types.Named)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore compiler-specific import data
|
// ignore compiler-specific import data
|
||||||
|
|
@ -190,7 +190,11 @@ type dddSlice struct {
|
||||||
func (t *dddSlice) Underlying() types.Type { return t }
|
func (t *dddSlice) Underlying() types.Type { return t }
|
||||||
func (t *dddSlice) String() string { return "..." + t.elem.String() }
|
func (t *dddSlice) String() string { return "..." + t.elem.String() }
|
||||||
|
|
||||||
func (p *importer) typ() types.Type {
|
// parent is the package which declared the type; parent == nil means
|
||||||
|
// the package currently imported. The parent package is needed for
|
||||||
|
// exported struct fields and interface methods which don't contain
|
||||||
|
// explicit package information in the export data.
|
||||||
|
func (p *importer) typ(parent *types.Package) types.Type {
|
||||||
// if the type was seen before, i is its index (>= 0)
|
// if the type was seen before, i is its index (>= 0)
|
||||||
i := p.tagOrIndex()
|
i := p.tagOrIndex()
|
||||||
if i >= 0 {
|
if i >= 0 {
|
||||||
|
|
@ -202,18 +206,18 @@ func (p *importer) typ() types.Type {
|
||||||
case namedTag:
|
case namedTag:
|
||||||
// read type object
|
// read type object
|
||||||
name := p.string()
|
name := p.string()
|
||||||
tpkg := p.pkg()
|
parent = p.pkg()
|
||||||
scope := tpkg.Scope()
|
scope := parent.Scope()
|
||||||
obj := scope.Lookup(name)
|
obj := scope.Lookup(name)
|
||||||
|
|
||||||
// if the object doesn't exist yet, create and insert it
|
// if the object doesn't exist yet, create and insert it
|
||||||
if obj == nil {
|
if obj == nil {
|
||||||
obj = types.NewTypeName(token.NoPos, tpkg, name, nil)
|
obj = types.NewTypeName(token.NoPos, parent, name, nil)
|
||||||
scope.Insert(obj)
|
scope.Insert(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := obj.(*types.TypeName); !ok {
|
if _, ok := obj.(*types.TypeName); !ok {
|
||||||
panic(fmt.Sprintf("pkg = %s, name = %s => %s", tpkg, name, obj))
|
panic(fmt.Sprintf("pkg = %s, name = %s => %s", parent, name, obj))
|
||||||
}
|
}
|
||||||
|
|
||||||
// associate new named type with obj if it doesn't exist yet
|
// associate new named type with obj if it doesn't exist yet
|
||||||
|
|
@ -224,7 +228,7 @@ func (p *importer) typ() types.Type {
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
// read underlying type
|
// read underlying type
|
||||||
t0.SetUnderlying(p.typ())
|
t0.SetUnderlying(p.typ(parent))
|
||||||
|
|
||||||
// interfaces don't have associated methods
|
// interfaces don't have associated methods
|
||||||
if _, ok := t0.Underlying().(*types.Interface); ok {
|
if _, ok := t0.Underlying().(*types.Interface); ok {
|
||||||
|
|
@ -239,7 +243,7 @@ func (p *importer) typ() types.Type {
|
||||||
result, _ := p.paramList()
|
result, _ := p.paramList()
|
||||||
p.int() // read and discard index of inlined function body
|
p.int() // read and discard index of inlined function body
|
||||||
sig := types.NewSignature(recv.At(0), params, result, isddd)
|
sig := types.NewSignature(recv.At(0), params, result, isddd)
|
||||||
t0.AddMethod(types.NewFunc(token.NoPos, tpkg, name, sig))
|
t0.AddMethod(types.NewFunc(token.NoPos, parent, name, sig))
|
||||||
}
|
}
|
||||||
|
|
||||||
return t
|
return t
|
||||||
|
|
@ -249,21 +253,21 @@ func (p *importer) typ() types.Type {
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
n := p.int64()
|
n := p.int64()
|
||||||
*t = *types.NewArray(p.typ(), n)
|
*t = *types.NewArray(p.typ(parent), n)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case sliceTag:
|
case sliceTag:
|
||||||
t := new(types.Slice)
|
t := new(types.Slice)
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
*t = *types.NewSlice(p.typ())
|
*t = *types.NewSlice(p.typ(parent))
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case dddTag:
|
case dddTag:
|
||||||
t := new(dddSlice)
|
t := new(dddSlice)
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
t.elem = p.typ()
|
t.elem = p.typ(parent)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case structTag:
|
case structTag:
|
||||||
|
|
@ -274,7 +278,7 @@ func (p *importer) typ() types.Type {
|
||||||
fields := make([]*types.Var, n)
|
fields := make([]*types.Var, n)
|
||||||
tags := make([]string, n)
|
tags := make([]string, n)
|
||||||
for i := range fields {
|
for i := range fields {
|
||||||
fields[i] = p.field()
|
fields[i] = p.field(parent)
|
||||||
tags[i] = p.string()
|
tags[i] = p.string()
|
||||||
}
|
}
|
||||||
*t = *types.NewStruct(fields, tags)
|
*t = *types.NewStruct(fields, tags)
|
||||||
|
|
@ -284,7 +288,7 @@ func (p *importer) typ() types.Type {
|
||||||
t := new(types.Pointer)
|
t := new(types.Pointer)
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
*t = *types.NewPointer(p.typ())
|
*t = *types.NewPointer(p.typ(parent))
|
||||||
return t
|
return t
|
||||||
|
|
||||||
case signatureTag:
|
case signatureTag:
|
||||||
|
|
@ -312,7 +316,7 @@ func (p *importer) typ() types.Type {
|
||||||
// read methods
|
// read methods
|
||||||
methods := make([]*types.Func, p.int())
|
methods := make([]*types.Func, p.int())
|
||||||
for i := range methods {
|
for i := range methods {
|
||||||
pkg, name := p.fieldName()
|
pkg, name := p.fieldName(parent)
|
||||||
params, isddd := p.paramList()
|
params, isddd := p.paramList()
|
||||||
result, _ := p.paramList()
|
result, _ := p.paramList()
|
||||||
sig := types.NewSignature(nil, params, result, isddd)
|
sig := types.NewSignature(nil, params, result, isddd)
|
||||||
|
|
@ -327,8 +331,8 @@ func (p *importer) typ() types.Type {
|
||||||
t := new(types.Map)
|
t := new(types.Map)
|
||||||
p.record(t)
|
p.record(t)
|
||||||
|
|
||||||
key := p.typ()
|
key := p.typ(parent)
|
||||||
val := p.typ()
|
val := p.typ(parent)
|
||||||
*t = *types.NewMap(key, val)
|
*t = *types.NewMap(key, val)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
|
|
@ -348,7 +352,7 @@ func (p *importer) typ() types.Type {
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("unexpected channel dir %d", d))
|
panic(fmt.Sprintf("unexpected channel dir %d", d))
|
||||||
}
|
}
|
||||||
val := p.typ()
|
val := p.typ(parent)
|
||||||
*t = *types.NewChan(dir, val)
|
*t = *types.NewChan(dir, val)
|
||||||
return t
|
return t
|
||||||
|
|
||||||
|
|
@ -357,18 +361,18 @@ func (p *importer) typ() types.Type {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) field() *types.Var {
|
func (p *importer) field(parent *types.Package) *types.Var {
|
||||||
pkg, name := p.fieldName()
|
pkg, name := p.fieldName(parent)
|
||||||
typ := p.typ()
|
typ := p.typ(parent)
|
||||||
|
|
||||||
anonymous := false
|
anonymous := false
|
||||||
if name == "" {
|
if name == "" {
|
||||||
// anonymous field - typ must be T or *T and T must be a type name
|
// anonymous field - typ must be T or *T and T must be a type name
|
||||||
switch typ := deref(typ).(type) {
|
switch typ := deref(typ).(type) {
|
||||||
case *types.Basic: // basic types are named types
|
case *types.Basic: // basic types are named types
|
||||||
|
pkg = nil // // objects defined in Universe scope have no package
|
||||||
name = typ.Name()
|
name = typ.Name()
|
||||||
case *types.Named:
|
case *types.Named:
|
||||||
pkg = p.pkgList[0]
|
|
||||||
name = typ.Obj().Name()
|
name = typ.Obj().Name()
|
||||||
default:
|
default:
|
||||||
panic("anonymous field expected")
|
panic("anonymous field expected")
|
||||||
|
|
@ -379,15 +383,20 @@ func (p *importer) field() *types.Var {
|
||||||
return types.NewField(token.NoPos, pkg, name, typ, anonymous)
|
return types.NewField(token.NoPos, pkg, name, typ, anonymous)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) fieldName() (*types.Package, string) {
|
func (p *importer) fieldName(parent *types.Package) (*types.Package, string) {
|
||||||
|
pkg := parent
|
||||||
|
if pkg == nil {
|
||||||
|
// use the imported package instead
|
||||||
|
pkg = p.pkgList[0]
|
||||||
|
}
|
||||||
name := p.string()
|
name := p.string()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return nil, "" // anonymous field
|
return pkg, "" // anonymous
|
||||||
}
|
}
|
||||||
pkg := p.pkgList[0]
|
|
||||||
if name == "?" || name != "_" && !exported(name) {
|
if name == "?" || name != "_" && !exported(name) {
|
||||||
|
// explicitly qualified field
|
||||||
if name == "?" {
|
if name == "?" {
|
||||||
name = ""
|
name = "" // anonymous
|
||||||
}
|
}
|
||||||
pkg = p.pkg()
|
pkg = p.pkg()
|
||||||
}
|
}
|
||||||
|
|
@ -415,7 +424,7 @@ func (p *importer) paramList() (*types.Tuple, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *importer) param(named bool) (*types.Var, bool) {
|
func (p *importer) param(named bool) (*types.Var, bool) {
|
||||||
t := p.typ()
|
t := p.typ(nil)
|
||||||
td, isddd := t.(*dddSlice)
|
td, isddd := t.(*dddSlice)
|
||||||
if isddd {
|
if isddd {
|
||||||
t = types.NewSlice(td.elem)
|
t = types.NewSlice(td.elem)
|
||||||
|
|
|
||||||
|
|
@ -35,11 +35,10 @@ var pkgExts = [...]string{".a", ".o"}
|
||||||
// If no file was found, an empty filename is returned.
|
// If no file was found, an empty filename is returned.
|
||||||
//
|
//
|
||||||
func FindPkg(path, srcDir string) (filename, id string) {
|
func FindPkg(path, srcDir string) (filename, id string) {
|
||||||
if len(path) == 0 {
|
if path == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
id = path
|
|
||||||
var noext string
|
var noext string
|
||||||
switch {
|
switch {
|
||||||
default:
|
default:
|
||||||
|
|
@ -50,6 +49,7 @@ func FindPkg(path, srcDir string) (filename, id string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||||
|
id = bp.ImportPath
|
||||||
|
|
||||||
case build.IsLocalImport(path):
|
case build.IsLocalImport(path):
|
||||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||||
|
|
@ -61,6 +61,13 @@ func FindPkg(path, srcDir string) (filename, id string) {
|
||||||
// does not support absolute imports
|
// does not support absolute imports
|
||||||
// "/x" -> "/x.ext", "/x"
|
// "/x" -> "/x.ext", "/x"
|
||||||
noext = path
|
noext = path
|
||||||
|
id = path
|
||||||
|
}
|
||||||
|
|
||||||
|
if false { // for debugging
|
||||||
|
if path != id {
|
||||||
|
fmt.Printf("%s -> %s\n", path, id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// try extensions
|
// try extensions
|
||||||
|
|
@ -107,27 +114,16 @@ func ImportData(packages map[string]*types.Package, filename, id string, data io
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Import imports a gc-generated package given its import path, adds the
|
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||||
// corresponding package object to the packages map, and returns the object.
|
// the corresponding package object to the packages map, and returns the object.
|
||||||
// Local import paths are interpreted relative to the current working directory.
|
|
||||||
// The packages map must contain all packages already imported.
|
// The packages map must contain all packages already imported.
|
||||||
//
|
//
|
||||||
func Import(packages map[string]*types.Package, path string) (pkg *types.Package, err error) {
|
func Import(packages map[string]*types.Package, path, srcDir string) (pkg *types.Package, err error) {
|
||||||
// package "unsafe" is handled by the type checker
|
|
||||||
if path == "unsafe" {
|
|
||||||
panic(`gcimporter.Import called for package "unsafe"`)
|
|
||||||
}
|
|
||||||
|
|
||||||
srcDir := "."
|
|
||||||
if build.IsLocalImport(path) {
|
|
||||||
srcDir, err = os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
filename, id := FindPkg(path, srcDir)
|
filename, id := FindPkg(path, srcDir)
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
|
if path == "unsafe" {
|
||||||
|
return types.Unsafe, nil
|
||||||
|
}
|
||||||
err = fmt.Errorf("can't find import: %s", id)
|
err = fmt.Errorf("can't find import: %s", id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -417,11 +413,11 @@ func (p *parser) parseBasicType() types.Type {
|
||||||
|
|
||||||
// ArrayType = "[" int_lit "]" Type .
|
// ArrayType = "[" int_lit "]" Type .
|
||||||
//
|
//
|
||||||
func (p *parser) parseArrayType() types.Type {
|
func (p *parser) parseArrayType(parent *types.Package) types.Type {
|
||||||
// "[" already consumed and lookahead known not to be "]"
|
// "[" already consumed and lookahead known not to be "]"
|
||||||
lit := p.expect(scanner.Int)
|
lit := p.expect(scanner.Int)
|
||||||
p.expect(']')
|
p.expect(']')
|
||||||
elem := p.parseType()
|
elem := p.parseType(parent)
|
||||||
n, err := strconv.ParseInt(lit, 10, 64)
|
n, err := strconv.ParseInt(lit, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.error(err)
|
p.error(err)
|
||||||
|
|
@ -431,35 +427,43 @@ func (p *parser) parseArrayType() types.Type {
|
||||||
|
|
||||||
// MapType = "map" "[" Type "]" Type .
|
// MapType = "map" "[" Type "]" Type .
|
||||||
//
|
//
|
||||||
func (p *parser) parseMapType() types.Type {
|
func (p *parser) parseMapType(parent *types.Package) types.Type {
|
||||||
p.expectKeyword("map")
|
p.expectKeyword("map")
|
||||||
p.expect('[')
|
p.expect('[')
|
||||||
key := p.parseType()
|
key := p.parseType(parent)
|
||||||
p.expect(']')
|
p.expect(']')
|
||||||
elem := p.parseType()
|
elem := p.parseType(parent)
|
||||||
return types.NewMap(key, elem)
|
return types.NewMap(key, elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name = identifier | "?" | QualifiedName .
|
// Name = identifier | "?" | QualifiedName .
|
||||||
//
|
//
|
||||||
// For unqualified names, the returned package is the imported package.
|
// For unqualified and anonymous names, the returned package is the parent
|
||||||
|
// package unless parent == nil, in which case the returned package is the
|
||||||
|
// package being imported. (The parent package is not nil if the the name
|
||||||
|
// is an unqualified struct field or interface method name belonging to a
|
||||||
|
// type declared in another package.)
|
||||||
|
//
|
||||||
// For qualified names, the returned package is nil (and not created if
|
// For qualified names, the returned package is nil (and not created if
|
||||||
// it doesn't exist yet) unless materializePkg is set (which creates an
|
// it doesn't exist yet) unless materializePkg is set (which creates an
|
||||||
// unnamed package). In the latter case, a subequent import clause is
|
// unnamed package with valid package path). In the latter case, a
|
||||||
// expected to provide a name for the package.
|
// subequent import clause is expected to provide a name for the package.
|
||||||
//
|
//
|
||||||
func (p *parser) parseName(materializePkg bool) (pkg *types.Package, name string) {
|
func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
|
||||||
|
pkg = parent
|
||||||
|
if pkg == nil {
|
||||||
|
pkg = p.sharedPkgs[p.id]
|
||||||
|
}
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case scanner.Ident:
|
case scanner.Ident:
|
||||||
pkg = p.sharedPkgs[p.id]
|
|
||||||
name = p.lit
|
name = p.lit
|
||||||
p.next()
|
p.next()
|
||||||
case '?':
|
case '?':
|
||||||
// anonymous
|
// anonymous
|
||||||
pkg = p.sharedPkgs[p.id]
|
|
||||||
p.next()
|
p.next()
|
||||||
case '@':
|
case '@':
|
||||||
// exported name prefixed with package path
|
// exported name prefixed with package path
|
||||||
|
pkg = nil
|
||||||
var id string
|
var id string
|
||||||
id, name = p.parseQualifiedName()
|
id, name = p.parseQualifiedName()
|
||||||
if materializePkg {
|
if materializePkg {
|
||||||
|
|
@ -480,15 +484,15 @@ func deref(typ types.Type) types.Type {
|
||||||
|
|
||||||
// Field = Name Type [ string_lit ] .
|
// Field = Name Type [ string_lit ] .
|
||||||
//
|
//
|
||||||
func (p *parser) parseField() (*types.Var, string) {
|
func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
|
||||||
pkg, name := p.parseName(true)
|
pkg, name := p.parseName(parent, true)
|
||||||
typ := p.parseType()
|
typ := p.parseType(parent)
|
||||||
anonymous := false
|
anonymous := false
|
||||||
if name == "" {
|
if name == "" {
|
||||||
// anonymous field - typ must be T or *T and T must be a type name
|
// anonymous field - typ must be T or *T and T must be a type name
|
||||||
switch typ := deref(typ).(type) {
|
switch typ := deref(typ).(type) {
|
||||||
case *types.Basic: // basic types are named types
|
case *types.Basic: // basic types are named types
|
||||||
pkg = nil
|
pkg = nil // objects defined in Universe scope have no package
|
||||||
name = typ.Name()
|
name = typ.Name()
|
||||||
case *types.Named:
|
case *types.Named:
|
||||||
name = typ.Obj().Name()
|
name = typ.Obj().Name()
|
||||||
|
|
@ -512,7 +516,7 @@ func (p *parser) parseField() (*types.Var, string) {
|
||||||
// StructType = "struct" "{" [ FieldList ] "}" .
|
// StructType = "struct" "{" [ FieldList ] "}" .
|
||||||
// FieldList = Field { ";" Field } .
|
// FieldList = Field { ";" Field } .
|
||||||
//
|
//
|
||||||
func (p *parser) parseStructType() types.Type {
|
func (p *parser) parseStructType(parent *types.Package) types.Type {
|
||||||
var fields []*types.Var
|
var fields []*types.Var
|
||||||
var tags []string
|
var tags []string
|
||||||
|
|
||||||
|
|
@ -522,7 +526,7 @@ func (p *parser) parseStructType() types.Type {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
p.expect(';')
|
p.expect(';')
|
||||||
}
|
}
|
||||||
fld, tag := p.parseField()
|
fld, tag := p.parseField(parent)
|
||||||
if tag != "" && tags == nil {
|
if tag != "" && tags == nil {
|
||||||
tags = make([]string, i)
|
tags = make([]string, i)
|
||||||
}
|
}
|
||||||
|
|
@ -539,7 +543,7 @@ func (p *parser) parseStructType() types.Type {
|
||||||
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
||||||
//
|
//
|
||||||
func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
|
func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
|
||||||
_, name := p.parseName(false)
|
_, name := p.parseName(nil, false)
|
||||||
// remove gc-specific parameter numbering
|
// remove gc-specific parameter numbering
|
||||||
if i := strings.Index(name, "·"); i >= 0 {
|
if i := strings.Index(name, "·"); i >= 0 {
|
||||||
name = name[:i]
|
name = name[:i]
|
||||||
|
|
@ -548,7 +552,7 @@ func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
|
||||||
p.expectSpecial("...")
|
p.expectSpecial("...")
|
||||||
isVariadic = true
|
isVariadic = true
|
||||||
}
|
}
|
||||||
typ := p.parseType()
|
typ := p.parseType(nil)
|
||||||
if isVariadic {
|
if isVariadic {
|
||||||
typ = types.NewSlice(typ)
|
typ = types.NewSlice(typ)
|
||||||
}
|
}
|
||||||
|
|
@ -611,7 +615,7 @@ func (p *parser) parseSignature(recv *types.Var) *types.Signature {
|
||||||
// by the compiler and thus embedded interfaces are never
|
// by the compiler and thus embedded interfaces are never
|
||||||
// visible in the export data.
|
// visible in the export data.
|
||||||
//
|
//
|
||||||
func (p *parser) parseInterfaceType() types.Type {
|
func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
|
||||||
var methods []*types.Func
|
var methods []*types.Func
|
||||||
|
|
||||||
p.expectKeyword("interface")
|
p.expectKeyword("interface")
|
||||||
|
|
@ -620,7 +624,7 @@ func (p *parser) parseInterfaceType() types.Type {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
p.expect(';')
|
p.expect(';')
|
||||||
}
|
}
|
||||||
pkg, name := p.parseName(true)
|
pkg, name := p.parseName(parent, true)
|
||||||
sig := p.parseSignature(nil)
|
sig := p.parseSignature(nil)
|
||||||
methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
|
methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
|
||||||
}
|
}
|
||||||
|
|
@ -633,7 +637,7 @@ func (p *parser) parseInterfaceType() types.Type {
|
||||||
|
|
||||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||||
//
|
//
|
||||||
func (p *parser) parseChanType() types.Type {
|
func (p *parser) parseChanType(parent *types.Package) types.Type {
|
||||||
dir := types.SendRecv
|
dir := types.SendRecv
|
||||||
if p.tok == scanner.Ident {
|
if p.tok == scanner.Ident {
|
||||||
p.expectKeyword("chan")
|
p.expectKeyword("chan")
|
||||||
|
|
@ -646,7 +650,7 @@ func (p *parser) parseChanType() types.Type {
|
||||||
p.expectKeyword("chan")
|
p.expectKeyword("chan")
|
||||||
dir = types.RecvOnly
|
dir = types.RecvOnly
|
||||||
}
|
}
|
||||||
elem := p.parseType()
|
elem := p.parseType(parent)
|
||||||
return types.NewChan(dir, elem)
|
return types.NewChan(dir, elem)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -661,24 +665,24 @@ func (p *parser) parseChanType() types.Type {
|
||||||
// PointerType = "*" Type .
|
// PointerType = "*" Type .
|
||||||
// FuncType = "func" Signature .
|
// FuncType = "func" Signature .
|
||||||
//
|
//
|
||||||
func (p *parser) parseType() types.Type {
|
func (p *parser) parseType(parent *types.Package) types.Type {
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case scanner.Ident:
|
case scanner.Ident:
|
||||||
switch p.lit {
|
switch p.lit {
|
||||||
default:
|
default:
|
||||||
return p.parseBasicType()
|
return p.parseBasicType()
|
||||||
case "struct":
|
case "struct":
|
||||||
return p.parseStructType()
|
return p.parseStructType(parent)
|
||||||
case "func":
|
case "func":
|
||||||
// FuncType
|
// FuncType
|
||||||
p.next()
|
p.next()
|
||||||
return p.parseSignature(nil)
|
return p.parseSignature(nil)
|
||||||
case "interface":
|
case "interface":
|
||||||
return p.parseInterfaceType()
|
return p.parseInterfaceType(parent)
|
||||||
case "map":
|
case "map":
|
||||||
return p.parseMapType()
|
return p.parseMapType(parent)
|
||||||
case "chan":
|
case "chan":
|
||||||
return p.parseChanType()
|
return p.parseChanType(parent)
|
||||||
}
|
}
|
||||||
case '@':
|
case '@':
|
||||||
// TypeName
|
// TypeName
|
||||||
|
|
@ -689,19 +693,19 @@ func (p *parser) parseType() types.Type {
|
||||||
if p.tok == ']' {
|
if p.tok == ']' {
|
||||||
// SliceType
|
// SliceType
|
||||||
p.next()
|
p.next()
|
||||||
return types.NewSlice(p.parseType())
|
return types.NewSlice(p.parseType(parent))
|
||||||
}
|
}
|
||||||
return p.parseArrayType()
|
return p.parseArrayType(parent)
|
||||||
case '*':
|
case '*':
|
||||||
// PointerType
|
// PointerType
|
||||||
p.next()
|
p.next()
|
||||||
return types.NewPointer(p.parseType())
|
return types.NewPointer(p.parseType(parent))
|
||||||
case '<':
|
case '<':
|
||||||
return p.parseChanType()
|
return p.parseChanType(parent)
|
||||||
case '(':
|
case '(':
|
||||||
// "(" Type ")"
|
// "(" Type ")"
|
||||||
p.next()
|
p.next()
|
||||||
typ := p.parseType()
|
typ := p.parseType(parent)
|
||||||
p.expect(')')
|
p.expect(')')
|
||||||
return typ
|
return typ
|
||||||
}
|
}
|
||||||
|
|
@ -783,7 +787,8 @@ func (p *parser) parseConstDecl() {
|
||||||
|
|
||||||
var typ0 types.Type
|
var typ0 types.Type
|
||||||
if p.tok != '=' {
|
if p.tok != '=' {
|
||||||
typ0 = p.parseType()
|
// constant types are never structured - no need for parent type
|
||||||
|
typ0 = p.parseType(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.expect('=')
|
p.expect('=')
|
||||||
|
|
@ -857,7 +862,7 @@ func (p *parser) parseTypeDecl() {
|
||||||
// structure, but throw it away if the object already has a type.
|
// structure, but throw it away if the object already has a type.
|
||||||
// This ensures that all imports refer to the same type object for
|
// This ensures that all imports refer to the same type object for
|
||||||
// a given type declaration.
|
// a given type declaration.
|
||||||
typ := p.parseType()
|
typ := p.parseType(pkg)
|
||||||
|
|
||||||
if name := obj.Type().(*types.Named); name.Underlying() == nil {
|
if name := obj.Type().(*types.Named); name.Underlying() == nil {
|
||||||
name.SetUnderlying(typ)
|
name.SetUnderlying(typ)
|
||||||
|
|
@ -869,7 +874,7 @@ func (p *parser) parseTypeDecl() {
|
||||||
func (p *parser) parseVarDecl() {
|
func (p *parser) parseVarDecl() {
|
||||||
p.expectKeyword("var")
|
p.expectKeyword("var")
|
||||||
pkg, name := p.parseExportedName()
|
pkg, name := p.parseExportedName()
|
||||||
typ := p.parseType()
|
typ := p.parseType(pkg)
|
||||||
pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
|
pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -905,7 +910,7 @@ func (p *parser) parseMethodDecl() {
|
||||||
base := deref(recv.Type()).(*types.Named)
|
base := deref(recv.Type()).(*types.Named)
|
||||||
|
|
||||||
// parse method name, signature, and possibly inlined body
|
// parse method name, signature, and possibly inlined body
|
||||||
_, name := p.parseName(false)
|
_, name := p.parseName(nil, false)
|
||||||
sig := p.parseFunc(recv)
|
sig := p.parseFunc(recv)
|
||||||
|
|
||||||
// methods always belong to the same package as the base type object
|
// methods always belong to the same package as the base type object
|
||||||
|
|
|
||||||
|
|
@ -60,9 +60,9 @@ func compileNewExport(t *testing.T, dirname, filename string) string {
|
||||||
return filepath.Join(dirname, filename[:len(filename)-2]+"o")
|
return filepath.Join(dirname, filename[:len(filename)-2]+"o")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testPath(t *testing.T, path string) *types.Package {
|
func testPath(t *testing.T, path, srcDir string) *types.Package {
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
pkg, err := Import(make(map[string]*types.Package), path)
|
pkg, err := Import(make(map[string]*types.Package), path, srcDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("testPath(%s): %s", path, err)
|
t.Errorf("testPath(%s): %s", path, err)
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -90,7 +90,7 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
|
||||||
for _, ext := range pkgExts {
|
for _, ext := range pkgExts {
|
||||||
if strings.HasSuffix(f.Name(), ext) {
|
if strings.HasSuffix(f.Name(), ext) {
|
||||||
name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
|
name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
|
||||||
if testPath(t, filepath.Join(dir, name)) != nil {
|
if testPath(t, filepath.Join(dir, name), dir) != nil {
|
||||||
nimports++
|
nimports++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -113,7 +113,7 @@ func TestImportTestdata(t *testing.T) {
|
||||||
defer os.Remove(outFn)
|
defer os.Remove(outFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pkg := testPath(t, "./testdata/exports"); pkg != nil {
|
if pkg := testPath(t, "./testdata/exports", "."); pkg != nil {
|
||||||
// The package's Imports list must include all packages
|
// The package's Imports list must include all packages
|
||||||
// explicitly imported by exports.go, plus all packages
|
// explicitly imported by exports.go, plus all packages
|
||||||
// referenced indirectly via exported objects in exports.go.
|
// referenced indirectly via exported objects in exports.go.
|
||||||
|
|
@ -143,7 +143,7 @@ func TestImportTestdataNewExport(t *testing.T) {
|
||||||
defer os.Remove(outFn)
|
defer os.Remove(outFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pkg := testPath(t, "./testdata/exports"); pkg != nil {
|
if pkg := testPath(t, "./testdata/exports", "."); pkg != nil {
|
||||||
// The package's Imports list must include all packages
|
// The package's Imports list must include all packages
|
||||||
// explicitly imported by exports.go, plus all packages
|
// explicitly imported by exports.go, plus all packages
|
||||||
// referenced indirectly via exported objects in exports.go.
|
// referenced indirectly via exported objects in exports.go.
|
||||||
|
|
@ -200,7 +200,7 @@ func TestImportedTypes(t *testing.T) {
|
||||||
importPath := s[0]
|
importPath := s[0]
|
||||||
objName := s[1]
|
objName := s[1]
|
||||||
|
|
||||||
pkg, err := Import(make(map[string]*types.Package), importPath)
|
pkg, err := Import(make(map[string]*types.Package), importPath, ".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
continue
|
continue
|
||||||
|
|
@ -228,7 +228,7 @@ func TestIssue5815(t *testing.T) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
pkg, err := Import(make(map[string]*types.Package), "strings")
|
pkg, err := Import(make(map[string]*types.Package), "strings", ".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -262,7 +262,7 @@ func TestCorrectMethodPackage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
imports := make(map[string]*types.Package)
|
imports := make(map[string]*types.Package)
|
||||||
_, err := Import(imports, "net/http")
|
_, err := Import(imports, "net/http", ".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -299,7 +299,7 @@ func TestIssue13566(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// import must succeed (test for issue at hand)
|
// import must succeed (test for issue at hand)
|
||||||
pkg, err := Import(make(map[string]*types.Package), "./testdata/b")
|
pkg, err := Import(make(map[string]*types.Package), "./testdata/b", ".")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
@ -311,3 +311,53 @@ func TestIssue13566(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIssue13898(t *testing.T) {
|
||||||
|
skipSpecialPlatforms(t)
|
||||||
|
|
||||||
|
// This package only handles gc export data.
|
||||||
|
if runtime.Compiler != "gc" {
|
||||||
|
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// import go/internal/gcimporter which imports go/types partially
|
||||||
|
imports := make(map[string]*types.Package)
|
||||||
|
_, err := Import(imports, "go/internal/gcimporter", ".")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// look for go/types package
|
||||||
|
var goTypesPkg *types.Package
|
||||||
|
for path, pkg := range imports {
|
||||||
|
if path == "go/types" {
|
||||||
|
goTypesPkg = pkg
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if goTypesPkg == nil {
|
||||||
|
t.Fatal("go/types not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// look for go/types.Object type
|
||||||
|
obj := goTypesPkg.Scope().Lookup("Object")
|
||||||
|
if obj == nil {
|
||||||
|
t.Fatal("go/types.Object not found")
|
||||||
|
}
|
||||||
|
typ, ok := obj.Type().(*types.Named)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("go/types.Object type is %v; wanted named type", typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup go/types.Object.Pkg method
|
||||||
|
m, _, _ := types.LookupFieldOrMethod(typ, false, nil, "Pkg")
|
||||||
|
if m == nil {
|
||||||
|
t.Fatal("go/types.Object.Pkg not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// the method must belong to go/types
|
||||||
|
if m.Pkg().Path() != "go/types" {
|
||||||
|
t.Fatalf("found %v; want go/types", m.Pkg())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -51,16 +51,42 @@ func (err Error) Error() string {
|
||||||
return fmt.Sprintf("%s: %s", err.Fset.Position(err.Pos), err.Msg)
|
return fmt.Sprintf("%s: %s", err.Fset.Position(err.Pos), err.Msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// An importer resolves import paths to Packages.
|
// An Importer resolves import paths to Packages.
|
||||||
// See go/importer for existing implementations.
|
//
|
||||||
|
// CAUTION: This interface does not support the import of locally
|
||||||
|
// vendored packages. See https://golang.org/s/go15vendor.
|
||||||
|
// If possible, external implementations should implement ImporterFrom.
|
||||||
type Importer interface {
|
type Importer interface {
|
||||||
// Import returns the imported package for the given import
|
// Import returns the imported package for the given import
|
||||||
// path, or an error if the package couldn't be imported.
|
// path, or an error if the package couldn't be imported.
|
||||||
// Import is responsible for returning the same package for
|
// Two calls to Import with the same path return the same
|
||||||
// matching import paths.
|
// package.
|
||||||
Import(path string) (*Package, error)
|
Import(path string) (*Package, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImportMode is reserved for future use.
|
||||||
|
type ImportMode int
|
||||||
|
|
||||||
|
// An ImporterFrom resolves import paths to packages; it
|
||||||
|
// supports vendoring per https://golang.org/s/go15vendor.
|
||||||
|
// Use go/importer to obtain an ImporterFrom implementation.
|
||||||
|
type ImporterFrom interface {
|
||||||
|
// Importer is present for backward-compatibility. Calling
|
||||||
|
// Import(path) is the same as calling ImportFrom(path, "", 0);
|
||||||
|
// i.e., locally vendored packages may not be found.
|
||||||
|
// The types package does not call Import if an ImporterFrom
|
||||||
|
// is present.
|
||||||
|
Importer
|
||||||
|
|
||||||
|
// ImportFrom returns the imported package for the given import
|
||||||
|
// path when imported by the package in srcDir, or an error
|
||||||
|
// if the package couldn't be imported. The mode value must
|
||||||
|
// be 0; it is reserved for future use.
|
||||||
|
// Two calls to ImportFrom with the same path and srcDir return
|
||||||
|
// the same package.
|
||||||
|
ImportFrom(path, srcDir string, mode ImportMode) (*Package, error)
|
||||||
|
}
|
||||||
|
|
||||||
// A Config specifies the configuration for type checking.
|
// A Config specifies the configuration for type checking.
|
||||||
// The zero value for Config is a ready-to-use default configuration.
|
// The zero value for Config is a ready-to-use default configuration.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
|
@ -86,9 +112,12 @@ type Config struct {
|
||||||
// error found.
|
// error found.
|
||||||
Error func(err error)
|
Error func(err error)
|
||||||
|
|
||||||
// Importer is called for each import declaration except when
|
// An importer is used to import packages referred to from
|
||||||
// importing package "unsafe". An error is reported if an
|
// import declarations.
|
||||||
// importer is needed but none was installed.
|
// If the installed importer implements ImporterFrom, the type
|
||||||
|
// checker calls ImportFrom instead of Import.
|
||||||
|
// The type checker reports an error if an importer is needed
|
||||||
|
// but none was installed.
|
||||||
Importer Importer
|
Importer Importer
|
||||||
|
|
||||||
// If Sizes != nil, it provides the sizing functions for package unsafe.
|
// If Sizes != nil, it provides the sizing functions for package unsafe.
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/importer"
|
"go/importer"
|
||||||
"go/parser"
|
"go/parser"
|
||||||
|
"internal/testenv"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
@ -204,3 +205,90 @@ L7 uses var z int`
|
||||||
t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want)
|
t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This tests that the package associated with the types.Object.Pkg method
|
||||||
|
// is the type's package independent of the order in which the imports are
|
||||||
|
// listed in the sources src1, src2 below.
|
||||||
|
// The actual issue is in go/internal/gcimporter which has a corresponding
|
||||||
|
// test; we leave this test here to verify correct behavior at the go/types
|
||||||
|
// level.
|
||||||
|
func TestIssue13898(t *testing.T) {
|
||||||
|
testenv.MustHaveGoBuild(t)
|
||||||
|
|
||||||
|
const src0 = `
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "go/types"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var info types.Info
|
||||||
|
for _, obj := range info.Uses {
|
||||||
|
_ = obj.Pkg()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
// like src0, but also imports go/importer
|
||||||
|
const src1 = `
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go/types"
|
||||||
|
_ "go/importer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var info types.Info
|
||||||
|
for _, obj := range info.Uses {
|
||||||
|
_ = obj.Pkg()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
// like src1 but with different import order
|
||||||
|
// (used to fail with this issue)
|
||||||
|
const src2 = `
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "go/importer"
|
||||||
|
"go/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var info types.Info
|
||||||
|
for _, obj := range info.Uses {
|
||||||
|
_ = obj.Pkg()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
f := func(test, src string) {
|
||||||
|
f, err := parser.ParseFile(fset, "", src, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
cfg := Config{Importer: importer.Default()}
|
||||||
|
info := Info{Uses: make(map[*ast.Ident]Object)}
|
||||||
|
_, err = cfg.Check("main", fset, []*ast.File{f}, &info)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var pkg *Package
|
||||||
|
count := 0
|
||||||
|
for id, obj := range info.Uses {
|
||||||
|
if id.Name == "Pkg" {
|
||||||
|
pkg = obj.Pkg()
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count != 1 {
|
||||||
|
t.Fatalf("%s: got %d entries named Pkg; want 1", test, count)
|
||||||
|
}
|
||||||
|
if pkg.Name() != "types" {
|
||||||
|
t.Fatalf("%s: got %v; want package types", test, pkg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f("src0", src0)
|
||||||
|
f("src1", src1)
|
||||||
|
f("src2", src2)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/constant"
|
"go/constant"
|
||||||
"go/token"
|
"go/token"
|
||||||
pathLib "path"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
@ -134,6 +133,20 @@ func (check *Checker) collectObjects() {
|
||||||
pkgImports[imp] = true
|
pkgImports[imp] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// srcDir is the directory used by the Importer to look up packages.
|
||||||
|
// The typechecker itself doesn't need this information so it is not
|
||||||
|
// explicitly provided. Instead, we extract it from position info of
|
||||||
|
// the source files as needed.
|
||||||
|
// This is the only place where the type-checker (just the importer)
|
||||||
|
// needs to know the actual source location of a file.
|
||||||
|
// TODO(gri) can we come up with a better API instead?
|
||||||
|
var srcDir string
|
||||||
|
if len(check.files) > 0 {
|
||||||
|
// FileName may be "" (typically for tests) in which case
|
||||||
|
// we get "." as the srcDir which is what we would want.
|
||||||
|
srcDir = dir(check.fset.Position(check.files[0].Name.Pos()).Filename)
|
||||||
|
}
|
||||||
|
|
||||||
for fileNo, file := range check.files {
|
for fileNo, file := range check.files {
|
||||||
// The package identifier denotes the current package,
|
// The package identifier denotes the current package,
|
||||||
// but there is no corresponding package object.
|
// but there is no corresponding package object.
|
||||||
|
|
@ -170,17 +183,20 @@ func (check *Checker) collectObjects() {
|
||||||
// TODO(gri) shouldn't create a new one each time
|
// TODO(gri) shouldn't create a new one each time
|
||||||
imp = NewPackage("C", "C")
|
imp = NewPackage("C", "C")
|
||||||
imp.fake = true
|
imp.fake = true
|
||||||
} else if path == "unsafe" {
|
|
||||||
// package "unsafe" is known to the language
|
|
||||||
imp = Unsafe
|
|
||||||
} else {
|
} else {
|
||||||
if importer := check.conf.Importer; importer != nil {
|
// ordinary import
|
||||||
|
if importer := check.conf.Importer; importer == nil {
|
||||||
|
err = fmt.Errorf("Config.Importer not installed")
|
||||||
|
} else if importerFrom, ok := importer.(ImporterFrom); ok {
|
||||||
|
imp, err = importerFrom.ImportFrom(path, srcDir, 0)
|
||||||
|
if imp == nil && err == nil {
|
||||||
|
err = fmt.Errorf("Config.Importer.ImportFrom(%s, %s, 0) returned nil but no error", path, pkg.path)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
imp, err = importer.Import(path)
|
imp, err = importer.Import(path)
|
||||||
if imp == nil && err == nil {
|
if imp == nil && err == nil {
|
||||||
err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path)
|
err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("Config.Importer not installed")
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
check.errorf(s.Path.Pos(), "could not import %s (%s)", path, err)
|
check.errorf(s.Path.Pos(), "could not import %s (%s)", path, err)
|
||||||
|
|
@ -435,7 +451,7 @@ func (check *Checker) unusedImports() {
|
||||||
// since _ identifiers are not entered into scopes.
|
// since _ identifiers are not entered into scopes.
|
||||||
if !obj.used {
|
if !obj.used {
|
||||||
path := obj.imported.path
|
path := obj.imported.path
|
||||||
base := pathLib.Base(path)
|
base := pkgName(path)
|
||||||
if obj.name == base {
|
if obj.name == base {
|
||||||
check.softErrorf(obj.pos, "%q imported but not used", path)
|
check.softErrorf(obj.pos, "%q imported but not used", path)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -453,3 +469,25 @@ func (check *Checker) unusedImports() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pkgName returns the package name (last element) of an import path.
|
||||||
|
func pkgName(path string) string {
|
||||||
|
if i := strings.LastIndex(path, "/"); i >= 0 {
|
||||||
|
path = path[i+1:]
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// dir makes a good-faith attempt to return the directory
|
||||||
|
// portion of path. If path is empty, the result is ".".
|
||||||
|
// (Per the go/build package dependency tests, we cannot import
|
||||||
|
// path/filepath and simply use filepath.Dir.)
|
||||||
|
func dir(path string) string {
|
||||||
|
if i := strings.LastIndexAny(path, "/\\"); i >= 0 {
|
||||||
|
path = path[:i]
|
||||||
|
}
|
||||||
|
if path == "" {
|
||||||
|
path = "."
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue