[dev.boringcrypto] all: merge master into dev.boringcrypto

Change-Id: Ic5f71c04f08c03319c043f35be501875adb0a3b0
This commit is contained in:
Chressie Himpel 2022-04-27 20:09:28 +02:00
commit ec7f5165dd
204 changed files with 6590 additions and 4889 deletions

View file

@ -35,7 +35,7 @@ Go is the work of thousands of contributors. We appreciate your help!
To contribute, please read the contribution guidelines at https://go.dev/doc/contribute. To contribute, please read the contribution guidelines at https://go.dev/doc/contribute.
Note that the Go project uses the issue tracker for bug reports and Note that the Go project uses the issue tracker for bug reports and
proposals only. See https://golang.org/wiki/Questions for a list of proposals only. See https://go.dev/wiki/Questions for a list of
places to ask questions about the Go language. places to ask questions about the Go language.
[rf]: https://reneefrench.blogspot.com/ [rf]: https://reneefrench.blogspot.com/

View file

@ -2,12 +2,12 @@
## Supported Versions ## Supported Versions
We support the past two Go releases (for example, Go 1.12.x and Go 1.13.x). We support the past two Go releases (for example, Go 1.17.x and Go 1.18.x when Go 1.18.x is the latest stable release).
See https://golang.org/wiki/Go-Release-Cycle and in particular the See https://go.dev/wiki/Go-Release-Cycle and in particular the
[Release Maintenance](https://github.com/golang/go/wiki/Go-Release-Cycle#release-maintenance) [Release Maintenance](https://go.dev/wiki/Go-Release-Cycle#release-maintenance)
part of that page. part of that page.
## Reporting a Vulnerability ## Reporting a Vulnerability
See https://golang.org/security for how to report a vulnerability. See https://go.dev/security for how to report a vulnerability.

3
api/next/30715.txt Normal file
View file

@ -0,0 +1,3 @@
pkg net/http, type MaxBytesError struct #30715
pkg net/http, type MaxBytesError struct, Limit int64 #30715
pkg net/http, method (*MaxBytesError) Error() string #30715

1
api/next/50599.txt Normal file
View file

@ -0,0 +1 @@
pkg os/exec, method (*Cmd) Environ() []string #50599

2
api/next/51684.txt Normal file
View file

@ -0,0 +1,2 @@
pkg regexp/syntax, const ErrNestingDepth = "expression nests too deeply" #51684
pkg regexp/syntax, const ErrNestingDepth ErrorCode #51684

View file

@ -1,3 +0,0 @@
pkg regexp/syntax, const ErrInvalidDepth = "invalid nesting depth" #0
pkg regexp/syntax, const ErrInvalidDepth ErrorCode #0

View file

@ -92,6 +92,16 @@ Do not send CLs removing the interior tags from such phrases.
TODO: complete this section TODO: complete this section
</p> </p>
<dl id="crypto/tls"><dt><a href="/pkg/crypto/tls/">crypto/tls</a></dt>
<dd>
<p><!-- CL 400974 -->
The <code>tls10default</code> <code>GODEBUG</code> option has been
removed. It is still possible to enable TLS 1.0 client-side by setting
<code>Config.MinVersion</code>.
</p>
</dd>
</dl><!-- crypto/tls -->
<dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt> <dl id="image/draw"><dt><a href="/pkg/image/draw/">image/draw</a></dt>
<dd> <dd>
<p><!-- CL 396795 --> <p><!-- CL 396795 -->
@ -132,6 +142,21 @@ Do not send CLs removing the interior tags from such phrases.
</dd> </dd>
</dl><!-- net --> </dl><!-- net -->
<dl id="os/exec"><dt><a href="/pkg/os/exec/">os/exec</a></dt>
<dd><!-- https://go.dev/issue/50599 -->
<p>
An <code>exec.Cmd</code> with a non-empty <code>Dir</code> and a
nil <code>Env</code> now implicitly sets the <code>PWD</code> environment
variable for the subprocess to match <code>Dir</code>.
</p>
<p>
The new method <code>(*exec.Cmd).Environ</code> reports the
environment that would be used to run the command, including the
aforementioned <code>PWD</code> variable.
</p>
</dd>
</dl> <!-- os/exec -->
<dl id="runtime"><dt><a href="/pkg/runtime/">runtime</a></dt> <dl id="runtime"><dt><a href="/pkg/runtime/">runtime</a></dt>
<dd> <dd>
<p><!-- https://go.dev/issue/51461 --> <p><!-- https://go.dev/issue/51461 -->

View file

@ -1,6 +1,6 @@
<!--{ <!--{
"Title": "The Go Programming Language Specification", "Title": "The Go Programming Language Specification",
"Subtitle": "Version of March 30, 2022", "Subtitle": "Version of April 19, 2022",
"Path": "/ref/spec" "Path": "/ref/spec"
}--> }-->
@ -1278,7 +1278,8 @@ then the <code>File</code> interface is implemented by both <code>S1</code> and
<p> <p>
Every type that is a member of the type set of an interface implements that interface. Every type that is a member of the type set of an interface implements that interface.
Any given type may implement several distinct interfaces. Any given type may implement several distinct interfaces.
For instance, all types implement the <i>empty interface</i> which stands for the set of all types: For instance, all types implement the <i>empty interface</i> which stands for the set
of all (non-interface) types:
</p> </p>
<pre> <pre>
@ -1380,7 +1381,7 @@ definition of an interface's type set as follows:
of its interface elements. of its interface elements.
</li> </li>
<li>The type set of a method specification is the set of types <li>The type set of a method specification is the set of all non-interface types
whose method sets include that method. whose method sets include that method.
</li> </li>
@ -1389,7 +1390,7 @@ definition of an interface's type set as follows:
</li> </li>
<li>The type set of a term of the form <code>~T</code> <li>The type set of a term of the form <code>~T</code>
is the set of types whose underlying type is <code>T</code>. is the set of all types whose underlying type is <code>T</code>.
</li> </li>
<li>The type set of a <i>union</i> of terms <li>The type set of a <i>union</i> of terms
@ -1398,6 +1399,15 @@ definition of an interface's type set as follows:
</li> </li>
</ul> </ul>
<p>
The quantification "the set of all non-interface types" refers not just to all (non-interface)
types declared in the program at hand, but all possible types in all possible programs, and
hence is infinite.
Similarly, given the set of all non-interface types that implement a particular method, the
intersection of the method sets of those types will contain exactly that method, even if all
types in the program at hand always pair that method with another method.
</p>
<p> <p>
By construction, an interface's type set never contains an interface type. By construction, an interface's type set never contains an interface type.
</p> </p>

View file

@ -3,8 +3,7 @@
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
#include <string.h> #include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include "_cgo_export.h" #include "_cgo_export.h"
void void
@ -31,32 +30,10 @@ IntoC(void)
BackIntoGo(); BackIntoGo();
} }
#ifdef WIN32 void
#include <windows.h> Issue1560InC(void)
long long
mysleep(int seconds) {
long long st = GetTickCount();
Sleep(1000 * seconds);
return st;
}
#else
#include <sys/time.h>
long long
mysleep(int seconds) {
long long st;
struct timeval tv;
gettimeofday(&tv, NULL);
st = tv.tv_sec * 1000 + tv.tv_usec / 1000;
sleep(seconds);
return st;
}
#endif
long long
twoSleep(int n)
{ {
BackgroundSleep(n); Issue1560FromC();
return mysleep(n);
} }
void void

View file

@ -11,6 +11,7 @@ import "testing"
// These wrappers are here for gotest to find. // These wrappers are here for gotest to find.
func Test1328(t *testing.T) { test1328(t) } func Test1328(t *testing.T) { test1328(t) }
func Test1560(t *testing.T) { test1560(t) }
func Test1635(t *testing.T) { test1635(t) } func Test1635(t *testing.T) { test1635(t) }
func Test3250(t *testing.T) { test3250(t) } func Test3250(t *testing.T) { test3250(t) }
func Test3729(t *testing.T) { test3729(t) } func Test3729(t *testing.T) { test3729(t) }
@ -89,7 +90,6 @@ func TestLibgcc(t *testing.T) { testLibgcc(t) }
func TestMultipleAssign(t *testing.T) { testMultipleAssign(t) } func TestMultipleAssign(t *testing.T) { testMultipleAssign(t) }
func TestNaming(t *testing.T) { testNaming(t) } func TestNaming(t *testing.T) { testNaming(t) }
func TestPanicFromC(t *testing.T) { testPanicFromC(t) } func TestPanicFromC(t *testing.T) { testPanicFromC(t) }
func TestParallelSleep(t *testing.T) { testParallelSleep(t) }
func TestPrintf(t *testing.T) { testPrintf(t) } func TestPrintf(t *testing.T) { testPrintf(t) }
func TestReturnAfterGrow(t *testing.T) { testReturnAfterGrow(t) } func TestReturnAfterGrow(t *testing.T) { testReturnAfterGrow(t) }
func TestReturnAfterGrowFromGo(t *testing.T) { testReturnAfterGrowFromGo(t) } func TestReturnAfterGrowFromGo(t *testing.T) { testReturnAfterGrowFromGo(t) }

View file

@ -18,7 +18,6 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time"
"unsafe" "unsafe"
) )
@ -30,8 +29,7 @@ extern void doAdd(int, int);
void IntoC(void); void IntoC(void);
// issue 1560 // issue 1560
// mysleep returns the absolute start time in ms. extern void Issue1560InC(void);
long long mysleep(int seconds);
// twoSleep returns the absolute start time of the first sleep // twoSleep returns the absolute start time of the first sleep
// in ms. // in ms.
@ -183,35 +181,40 @@ func test1328(t *testing.T) {
} }
// issue 1560 // issue 1560
// Test that C functions and Go functions run in parallel.
var sleepDone = make(chan int64) var (
issue1560 int32
// parallelSleep returns the absolute difference between the start time issue1560Ch = make(chan bool, 2)
// of the two sleeps. )
func parallelSleep(n int) int64 {
t := int64(C.twoSleep(C.int(n))) - <-sleepDone //export Issue1560FromC
if t < 0 { func Issue1560FromC() {
return -t for atomic.LoadInt32(&issue1560) != 1 {
runtime.Gosched()
} }
return t atomic.AddInt32(&issue1560, 1)
for atomic.LoadInt32(&issue1560) != 3 {
runtime.Gosched()
}
issue1560Ch <- true
} }
//export BackgroundSleep func Issue1560FromGo() {
func BackgroundSleep(n int32) { atomic.AddInt32(&issue1560, 1)
go func() { for atomic.LoadInt32(&issue1560) != 2 {
sleepDone <- int64(C.mysleep(C.int(n))) runtime.Gosched()
}() }
atomic.AddInt32(&issue1560, 1)
issue1560Ch <- true
} }
func testParallelSleep(t *testing.T) { func test1560(t *testing.T) {
sleepSec := 1 go Issue1560FromGo()
dt := time.Duration(parallelSleep(sleepSec)) * time.Millisecond go C.Issue1560InC()
t.Logf("difference in start time for two sleep(%d) is %v", sleepSec, dt) <-issue1560Ch
// bug used to run sleeps in serial, producing a 2*sleepSec-second delay. <-issue1560Ch
// we detect if the start times of those sleeps are > 0.5*sleepSec-second.
if dt >= time.Duration(sleepSec)*time.Second/2 {
t.Fatalf("parallel %d-second sleeps slept for %f seconds", sleepSec, dt.Seconds())
}
} }
// issue 2462 // issue 2462

View file

@ -96,7 +96,7 @@ if [ "$BOOTSTRAP_FORMAT" = "mintgz" ]; then
echo "Preparing to generate build system's ${OUTGZ}; cleaning ..." echo "Preparing to generate build system's ${OUTGZ}; cleaning ..."
rm -rf bin/gofmt rm -rf bin/gofmt
rm -rf src/runtime/race/race_*.syso rm -rf src/runtime/race/race_*.syso
rm -rf api test doc misc/cgo/test misc/trace rm -rf api test doc misc/cgo/test
rm -rf pkg/tool/*_*/{addr2line,api,cgo,cover,doc,fix,nm,objdump,pack,pprof,test2json,trace,vet} rm -rf pkg/tool/*_*/{addr2line,api,cgo,cover,doc,fix,nm,objdump,pack,pprof,test2json,trace,vet}
rm -rf pkg/*_*/{image,database,cmd} rm -rf pkg/*_*/{image,database,cmd}
rm -rf $(find . -type d -name testdata) rm -rf $(find . -type d -name testdata)

View file

@ -731,13 +731,28 @@ func (b *Writer) WriteRune(r rune) (size int, err error) {
// If the count is less than len(s), it also returns an error explaining // If the count is less than len(s), it also returns an error explaining
// why the write is short. // why the write is short.
func (b *Writer) WriteString(s string) (int, error) { func (b *Writer) WriteString(s string) (int, error) {
var sw io.StringWriter
tryStringWriter := true
nn := 0 nn := 0
for len(s) > b.Available() && b.err == nil { for len(s) > b.Available() && b.err == nil {
n := copy(b.buf[b.n:], s) var n int
if b.Buffered() == 0 && sw == nil && tryStringWriter {
// Check at most once whether b.wr is a StringWriter.
sw, tryStringWriter = b.wr.(io.StringWriter)
}
if b.Buffered() == 0 && tryStringWriter {
// Large write, empty buffer, and the underlying writer supports
// WriteString: forward the write to the underlying StringWriter.
// This avoids an extra copy.
n, b.err = sw.WriteString(s)
} else {
n = copy(b.buf[b.n:], s)
b.n += n b.n += n
b.Flush()
}
nn += n nn += n
s = s[n:] s = s[n:]
b.Flush()
} }
if b.err != nil { if b.err != nil {
return nn, b.err return nn, b.err

View file

@ -762,6 +762,67 @@ func TestWriteString(t *testing.T) {
} }
} }
func TestWriteStringStringWriter(t *testing.T) {
const BufSize = 8
{
tw := &teststringwriter{}
b := NewWriterSize(tw, BufSize)
b.WriteString("1234")
tw.check(t, "", "")
b.WriteString("56789012") // longer than BufSize
tw.check(t, "12345678", "") // but not enough (after filling the partially-filled buffer)
b.Flush()
tw.check(t, "123456789012", "")
}
{
tw := &teststringwriter{}
b := NewWriterSize(tw, BufSize)
b.WriteString("123456789") // long string, empty buffer:
tw.check(t, "", "123456789") // use WriteString
}
{
tw := &teststringwriter{}
b := NewWriterSize(tw, BufSize)
b.WriteString("abc")
tw.check(t, "", "")
b.WriteString("123456789012345") // long string, non-empty buffer
tw.check(t, "abc12345", "6789012345") // use Write and then WriteString since the remaining part is still longer than BufSize
}
{
tw := &teststringwriter{}
b := NewWriterSize(tw, BufSize)
b.Write([]byte("abc")) // same as above, but use Write instead of WriteString
tw.check(t, "", "")
b.WriteString("123456789012345")
tw.check(t, "abc12345", "6789012345") // same as above
}
}
type teststringwriter struct {
write string
writeString string
}
func (w *teststringwriter) Write(b []byte) (int, error) {
w.write += string(b)
return len(b), nil
}
func (w *teststringwriter) WriteString(s string) (int, error) {
w.writeString += s
return len(s), nil
}
func (w *teststringwriter) check(t *testing.T, write, writeString string) {
t.Helper()
if w.write != write {
t.Errorf("write: expected %q, got %q", write, w.write)
}
if w.writeString != writeString {
t.Errorf("writeString: expected %q, got %q", writeString, w.writeString)
}
}
func TestBufferFull(t *testing.T) { func TestBufferFull(t *testing.T) {
const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party" const longString = "And now, hello, world! It is the time for all good men to come to the aid of their party"
buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize) buf := NewReaderSize(strings.NewReader(longString), minReadBufferSize)

View file

@ -33,7 +33,7 @@ Those that aren't guaranteed may change in future versions of Go (for
example, we've considered changing the alignment of int64 on 32-bit). example, we've considered changing the alignment of int64 on 32-bit).
| Type | 64-bit | | 32-bit | | | Type | 64-bit | | 32-bit | |
| --- | --- | --- | --- | --- | |-----------------------------|--------|-------|--------|-------|
| | Size | Align | Size | Align | | | Size | Align | Size | Align |
| bool, uint8, int8 | 1 | 1 | 1 | 1 | | bool, uint8, int8 | 1 | 1 | 1 | 1 |
| uint16, int16 | 2 | 2 | 2 | 2 | | uint16, int16 | 2 | 2 | 2 | 2 |

View file

@ -219,11 +219,13 @@ calling the function.
//go:uintptrescapes //go:uintptrescapes
The //go:uintptrescapes directive must be followed by a function declaration. The //go:uintptrescapes directive must be followed by a function declaration.
It specifies that the function's uintptr arguments may be pointer values It specifies that the function's uintptr arguments may be pointer values that
that have been converted to uintptr and must be treated as such by the have been converted to uintptr and must be on the heap and kept alive for the
garbage collector. The conversion from pointer to uintptr must appear in duration of the call, even though from the types alone it would appear that the
the argument list of any call to this function. This directive is necessary object is no longer needed during the call. The conversion from pointer to
for some low-level system call implementations and should be avoided otherwise. uintptr must appear in the argument list of any call to this function. This
directive is necessary for some low-level system call implementations and
should be avoided otherwise.
//go:noinline //go:noinline

View file

@ -0,0 +1,272 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package compare contains code for generating comparison
// routines for structs, strings and interfaces.
package compare
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"fmt"
"math/bits"
"sort"
)
// IsRegularMemory reports whether t can be compared/hashed as regular memory.
func IsRegularMemory(t *types.Type) bool {
a, _ := types.AlgType(t)
return a == types.AMEM
}
// Memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
func Memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
if next == t.NumFields() {
break
}
// Stop run after a padded field.
if types.IsPaddedField(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
break
}
// For issue 46283, don't combine fields if the resulting load would
// require a larger alignment than the component fields.
if base.Ctxt.Arch.Alignment > 1 {
align := t.Alignment()
if off := t.Field(start).Offset; off&(align-1) != 0 {
// Offset is less aligned than the containing type.
// Use offset to determine alignment.
align = 1 << uint(bits.TrailingZeros64(uint64(off)))
}
size := t.Field(next).End() - t.Field(start).Offset
if size > align {
break
}
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
switch t.Kind() {
default:
return false
case types.TINTER:
return true
case types.TARRAY:
return EqCanPanic(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
return true
}
}
return false
}
}
// EqStruct compares two structs np and nq for equality.
// It works by building a list of boolean conditions to satisfy.
// Conditions must be evaluated in the returned order and
// properly short circuited by the caller.
func EqStruct(t *types.Type, np, nq ir.Node) []ir.Node {
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]ir.Node
conds = append(conds, []ir.Node{})
and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
if f.Sym.IsBlank() {
i++
continue
}
// Compare non-memory fields with field equality.
if !IsRegularMemory(f.Type) {
if EqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := EqString(p, q)
and(eqlen)
and(eqmem)
default:
and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
}
if EqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{})
}
i++
continue
}
// Find maximal length run of memory-only fields.
size, next := Memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields.
if s := fields[i:next]; len(s) <= 2 {
// Two or fewer fields: use plain field equality.
for _, f := range s {
and(eqfield(np, nq, ir.OEQ, f.Sym))
}
} else {
// More than two fields: use memequal.
cc := eqmem(np, nq, f.Sym, size)
and(cc)
}
i = next
}
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []ir.Node
for _, c := range conds {
isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
return flatConds
}
// EqString returns the nodes
//
// len(s) == len(t)
//
// and
//
// memequal(s.ptr, t.ptr, len(s))
//
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = typecheck.Conv(s, types.Types[types.TSTRING])
t = typecheck.Conv(t, types.Types[types.TSTRING])
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
fn := typecheck.LookupRuntime("memequal")
fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// EqInterface returns the nodes
//
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
//
// and
//
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
//
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = typecheck.LookupRuntime("efaceeq")
} else {
fn = typecheck.LookupRuntime("ifaceeq")
}
stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
sdata.SetType(types.Types[types.TUNSAFEPTR])
tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqfield returns the node
//
// p.field == q.field
func eqfield(p ir.Node, q ir.Node, op ir.Op, field *types.Sym) ir.Node {
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
ne := ir.NewBinaryExpr(base.Pos, op, nx, ny)
return ne
}
// eqmem returns the node
//
// memequal(&p.field, &q.field, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args.Append(nx)
call.Args.Append(ny)
if needsize {
call.Args.Append(ir.NewInt(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
fn = typecheck.LookupRuntime("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
fn = typecheck.LookupRuntime(buf)
}
fn = typecheck.SubstArgTypes(fn, t, t)
return fn, needsize
}

View file

@ -422,8 +422,6 @@ func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
} }
if fn.Pragma&ir.UintptrEscapes != 0 { if fn.Pragma&ir.UintptrEscapes != 0 {
fn.Pragma |= ir.UintptrKeepAlive
if f.Type.IsUintptr() { if f.Type.IsUintptr() {
if diagnose { if diagnose {
base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name()) base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())

View file

@ -120,6 +120,17 @@ func CanInline(fn *ir.Func) {
return return
} }
// If marked as "go:uintptrkeepalive", don't inline, since the
// keep alive information is lost during inlining.
//
// TODO(prattmic): This is handled on calls during escape analysis,
// which is after inlining. Move prior to inlining so the keep-alive is
// maintained after inlining.
if fn.Pragma&ir.UintptrKeepAlive != 0 {
reason = "marked as having a keep-alive uintptr argument"
return
}
// If marked as "go:uintptrescapes", don't inline, since the // If marked as "go:uintptrescapes", don't inline, since the
// escape information is lost during inlining. // escape information is lost during inlining.
if fn.Pragma&ir.UintptrEscapes != 0 { if fn.Pragma&ir.UintptrEscapes != 0 {

View file

@ -459,7 +459,7 @@ const (
Noinline // func should not be inlined Noinline // func should not be inlined
NoCheckPtr // func should not be instrumented by checkptr NoCheckPtr // func should not be instrumented by checkptr
CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only) UintptrKeepAlive // pointers converted to uintptr must be kept alive
UintptrEscapes // pointers converted to uintptr escape UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas. // Runtime-only func pragmas.

View file

@ -125,9 +125,27 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
} }
} }
if decl.Body != nil && fn.Pragma&ir.Noescape != 0 { if decl.Body != nil {
if fn.Pragma&ir.Noescape != 0 {
base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations") base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations")
} }
if (fn.Pragma&ir.UintptrKeepAlive != 0 && fn.Pragma&ir.UintptrEscapes == 0) && fn.Pragma&ir.Nosplit == 0 {
// Stack growth can't handle uintptr arguments that may
// be pointers (as we don't know which are pointers
// when creating the stack map). Thus uintptrkeepalive
// functions (and all transitive callees) must be
// nosplit.
//
// N.B. uintptrescapes implies uintptrkeepalive but it
// is OK since the arguments must escape to the heap.
//
// TODO(prattmic): Add recursive nosplit check of callees.
// TODO(prattmic): Functions with no body (i.e.,
// assembly) must also be nosplit, but we can't check
// that here.
base.ErrorfAt(fn.Pos(), "go:uintptrkeepalive requires go:nosplit")
}
}
if decl.Name.Value == "init" && decl.Recv == nil { if decl.Name.Value == "init" && decl.Recv == nil {
g.target.Inits = append(g.target.Inits, fn) g.target.Inits = append(g.target.Inits, fn)

View file

@ -30,6 +30,7 @@ const (
ir.NoCheckPtr | ir.NoCheckPtr |
ir.RegisterParams | // TODO(register args) remove after register abi is working ir.RegisterParams | // TODO(register args) remove after register abi is working
ir.CgoUnsafeArgs | ir.CgoUnsafeArgs |
ir.UintptrKeepAlive |
ir.UintptrEscapes | ir.UintptrEscapes |
ir.Systemstack | ir.Systemstack |
ir.Nowritebarrier | ir.Nowritebarrier |
@ -67,19 +68,13 @@ func pragmaFlag(verb string) ir.PragmaFlag {
return ir.Yeswritebarrierrec return ir.Yeswritebarrierrec
case "go:cgo_unsafe_args": case "go:cgo_unsafe_args":
return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968) return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
case "go:uintptrkeepalive":
return ir.UintptrKeepAlive
case "go:uintptrescapes": case "go:uintptrescapes":
// For the next function declared in the file // This directive extends //go:uintptrkeepalive by forcing
// any uintptr arguments may be pointer values // uintptr arguments to escape to the heap, which makes stack
// converted to uintptr. This directive // growth safe.
// ensures that the referenced allocated return ir.UintptrEscapes | ir.UintptrKeepAlive // implies UintptrKeepAlive
// object, if any, is retained and not moved
// until the call completes, even though from
// the types alone it would appear that the
// object is no longer needed during the
// call. The conversion to uintptr must appear
// in the argument list.
// Used in syscall/dll_windows.go.
return ir.UintptrEscapes
case "go:registerparams": // TODO(register args) remove after register abi is working case "go:registerparams": // TODO(register args) remove after register abi is working
return ir.RegisterParams return ir.RegisterParams
case "go:notinheap": case "go:notinheap":

View file

@ -340,6 +340,9 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 { if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)}) p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
} }
if flag == ir.UintptrKeepAlive && !base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is only allowed in the standard library", verb)})
}
if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std { if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)}) p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
} }

View file

@ -742,6 +742,22 @@ func (w *writer) funcExt(obj *types2.Func) {
if pragma&ir.Noescape != 0 { if pragma&ir.Noescape != 0 {
w.p.errorf(decl, "can only use //go:noescape with external func implementations") w.p.errorf(decl, "can only use //go:noescape with external func implementations")
} }
if (pragma&ir.UintptrKeepAlive != 0 && pragma&ir.UintptrEscapes == 0) && pragma&ir.Nosplit == 0 {
// Stack growth can't handle uintptr arguments that may
// be pointers (as we don't know which are pointers
// when creating the stack map). Thus uintptrkeepalive
// functions (and all transitive callees) must be
// nosplit.
//
// N.B. uintptrescapes implies uintptrkeepalive but it
// is OK since the arguments must escape to the heap.
//
// TODO(prattmic): Add recursive nosplit check of callees.
// TODO(prattmic): Functions with no body (i.e.,
// assembly) must also be nosplit, but we can't check
// that here.
w.p.errorf(decl, "go:uintptrkeepalive requires go:nosplit")
}
} else { } else {
if base.Flag.Complete || decl.Name.Value == "init" { if base.Flag.Complete || decl.Name.Value == "init" {
// Linknamed functions are allowed to have no body. Hopefully // Linknamed functions are allowed to have no body. Hopefully

View file

@ -6,10 +6,9 @@ package reflectdata
import ( import (
"fmt" "fmt"
"math/bits"
"sort"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/compare"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/objw" "cmd/compile/internal/objw"
"cmd/compile/internal/typecheck" "cmd/compile/internal/typecheck"
@ -17,32 +16,6 @@ import (
"cmd/internal/obj" "cmd/internal/obj"
) )
// isRegularMemory reports whether t can be compared/hashed as regular memory.
func isRegularMemory(t *types.Type) bool {
a, _ := types.AlgType(t)
return a == types.AMEM
}
// eqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func eqCanPanic(t *types.Type) bool {
switch t.Kind() {
default:
return false
case types.TINTER:
return true
case types.TARRAY:
return eqCanPanic(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
return true
}
}
return false
}
}
// AlgType returns the fixed-width AMEMxx variants instead of the general // AlgType returns the fixed-width AMEMxx variants instead of the general
// AMEM kind when possible. // AMEM kind when possible.
func AlgType(t *types.Type) types.AlgKind { func AlgType(t *types.Type) types.AlgKind {
@ -206,7 +179,7 @@ func genhash(t *types.Type) *obj.LSym {
} }
// Hash non-memory fields with appropriate hash function. // Hash non-memory fields with appropriate hash function.
if !isRegularMemory(f.Type) { if !compare.IsRegularMemory(f.Type) {
hashel := hashfor(f.Type) hashel := hashfor(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil) call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages? nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
@ -219,7 +192,7 @@ func genhash(t *types.Type) *obj.LSym {
} }
// Otherwise, hash a maximal length run of raw memory. // Otherwise, hash a maximal length run of raw memory.
size, next := memrun(t, i) size, next := compare.Memrun(t, i)
// h = hashel(&p.first, size, h) // h = hashel(&p.first, size, h)
hashel := hashmem(f.Type) hashel := hashmem(f.Type)
@ -510,12 +483,12 @@ func geneq(t *types.Type) *obj.LSym {
// Second, check that all the contents match (expensive). // Second, check that all the contents match (expensive).
checkAll(3, false, func(pi, qi ir.Node) ir.Node { checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths. // Compare lengths.
eqlen, _ := EqString(pi, qi) eqlen, _ := compare.EqString(pi, qi)
return eqlen return eqlen
}) })
checkAll(1, true, func(pi, qi ir.Node) ir.Node { checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents. // Compare contents.
_, eqmem := EqString(pi, qi) _, eqmem := compare.EqString(pi, qi)
return eqmem return eqmem
}) })
case types.TFLOAT32, types.TFLOAT64: case types.TFLOAT32, types.TFLOAT64:
@ -532,81 +505,7 @@ func geneq(t *types.Type) *obj.LSym {
} }
case types.TSTRUCT: case types.TSTRUCT:
// Build a list of conditions to satisfy. flatConds := compare.EqStruct(t, np, nq)
// The conditions are a list-of-lists. Conditions are reorderable
// within each inner list. The outer lists must be evaluated in order.
var conds [][]ir.Node
conds = append(conds, []ir.Node{})
and := func(n ir.Node) {
i := len(conds) - 1
conds[i] = append(conds[i], n)
}
// Walk the struct using memequal for runs of AMEM
// and calling specific equality tests for the others.
for i, fields := 0, t.FieldSlice(); i < len(fields); {
f := fields[i]
// Skip blank-named fields.
if f.Sym.IsBlank() {
i++
continue
}
// Compare non-memory fields with field equality.
if !isRegularMemory(f.Type) {
if eqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
eqlen, eqmem := EqString(p, q)
and(eqlen)
and(eqmem)
default:
and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
}
if eqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{})
}
i++
continue
}
// Find maximal length run of memory-only fields.
size, next := memrun(t, i)
// TODO(rsc): All the calls to newname are wrong for
// cross-package unexported fields.
if s := fields[i:next]; len(s) <= 2 {
// Two or fewer fields: use plain field equality.
for _, f := range s {
and(eqfield(np, nq, f.Sym))
}
} else {
// More than two fields: use memequal.
and(eqmem(np, nq, f.Sym, size))
}
i = next
}
// Sort conditions to put runtime calls last.
// Preserve the rest of the ordering.
var flatConds []ir.Node
for _, c := range conds {
isCall := func(n ir.Node) bool {
return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
}
sort.SliceStable(c, func(i, j int) bool {
return !isCall(c[i]) && isCall(c[j])
})
flatConds = append(flatConds, c...)
}
if len(flatConds) == 0 { if len(flatConds) == 0 {
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true))) fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
} else { } else {
@ -631,7 +530,7 @@ func geneq(t *types.Type) *obj.LSym {
// return (or goto ret) // return (or goto ret)
fn.Body.Append(ir.NewLabelStmt(base.Pos, neq)) fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false))) fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
if eqCanPanic(t) || anyCall(fn) { if compare.EqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case. // Epilogue is large, so share it with the equal case.
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret)) fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
} else { } else {
@ -680,153 +579,6 @@ func anyCall(fn *ir.Func) bool {
}) })
} }
// eqfield returns the node
//
// p.field == q.field
func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)
return ne
}
// EqString returns the nodes
//
// len(s) == len(t)
//
// and
//
// memequal(s.ptr, t.ptr, len(s))
//
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = typecheck.Conv(s, types.Types[types.TSTRING])
t = typecheck.Conv(t, types.Types[types.TSTRING])
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
fn := typecheck.LookupRuntime("memequal")
fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// EqInterface returns the nodes
//
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
//
// and
//
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
//
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
}
// func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
// func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
var fn ir.Node
if s.Type().IsEmptyInterface() {
fn = typecheck.LookupRuntime("efaceeq")
} else {
fn = typecheck.LookupRuntime("ifaceeq")
}
stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
sdata.SetType(types.Types[types.TUNSAFEPTR])
tdata.SetType(types.Types[types.TUNSAFEPTR])
sdata.SetTypecheck(1)
tdata.SetTypecheck(1)
call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
cmp.SetType(types.Types[types.TBOOL])
return cmp, call
}
// eqmem returns the node
//
// memequal(&p.field, &q.field [, size])
func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
fn, needsize := eqmemfunc(size, nx.Type().Elem())
call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
call.Args.Append(nx)
call.Args.Append(ny)
if needsize {
call.Args.Append(ir.NewInt(size))
}
return call
}
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
switch size {
default:
fn = typecheck.LookupRuntime("memequal")
needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
fn = typecheck.LookupRuntime(buf)
}
fn = typecheck.SubstArgTypes(fn, t, t)
return fn, needsize
}
// memrun finds runs of struct fields for which memory-only algs are appropriate.
// t is the parent struct type, and start is the field index at which to start the run.
// size is the length in bytes of the memory included in the run.
// next is the index just after the end of the memory run.
func memrun(t *types.Type, start int) (size int64, next int) {
next = start
for {
next++
if next == t.NumFields() {
break
}
// Stop run after a padded field.
if types.IsPaddedField(t, next-1) {
break
}
// Also, stop before a blank or non-memory field.
if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
break
}
// For issue 46283, don't combine fields if the resulting load would
// require a larger alignment than the component fields.
if base.Ctxt.Arch.Alignment > 1 {
align := t.Alignment()
if off := t.Field(start).Offset; off&(align-1) != 0 {
// Offset is less aligned than the containing type.
// Use offset to determine alignment.
align = 1 << uint(bits.TrailingZeros64(uint64(off)))
}
size := t.Field(next).End() - t.Field(start).Offset
if size > align {
break
}
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
func hashmem(t *types.Type) ir.Node { func hashmem(t *types.Type) ir.Node {
sym := ir.Pkgs.Runtime.Lookup("memhash") sym := ir.Pkgs.Runtime.Lookup("memhash")

View file

@ -14,6 +14,7 @@ import (
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/bitvec" "cmd/compile/internal/bitvec"
"cmd/compile/internal/compare"
"cmd/compile/internal/escape" "cmd/compile/internal/escape"
"cmd/compile/internal/inline" "cmd/compile/internal/inline"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
@ -728,7 +729,7 @@ func dcommontype(lsym *obj.LSym, t *types.Type) int {
if t.Sym() != nil && t.Sym().Name != "" { if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed tflag |= tflagNamed
} }
if isRegularMemory(t) { if compare.IsRegularMemory(t) {
tflag |= tflagRegularMemory tflag |= tflagRegularMemory
} }

View file

@ -827,7 +827,7 @@ func (p *parser) unaryExpr() Expr {
switch p.tok { switch p.tok {
case _Operator, _Star: case _Operator, _Star:
switch p.op { switch p.op {
case Mul, Add, Sub, Not, Xor: case Mul, Add, Sub, Not, Xor, Tilde:
x := new(Operation) x := new(Operation)
x.pos = p.pos() x.pos = p.pos()
x.Op = p.op x.Op = p.op
@ -991,7 +991,7 @@ func (p *parser) operand(keep_parens bool) Expr {
case _Func: case _Func:
pos := p.pos() pos := p.pos()
p.next() p.next()
_, ftyp := p.funcType("function literal") _, ftyp := p.funcType("function type")
if p.tok == _Lbrace { if p.tok == _Lbrace {
p.xnest++ p.xnest++
@ -1499,45 +1499,15 @@ func (p *parser) interfaceType() *InterfaceType {
p.want(_Interface) p.want(_Interface)
p.want(_Lbrace) p.want(_Lbrace)
p.list("interface type", _Semi, _Rbrace, func() bool { p.list("interface type", _Semi, _Rbrace, func() bool {
switch p.tok { var f *Field
case _Name: if p.tok == _Name {
f := p.methodDecl() f = p.methodDecl()
if f.Name == nil { }
if f == nil || f.Name == nil {
f = p.embeddedElem(f) f = p.embeddedElem(f)
} }
typ.MethodList = append(typ.MethodList, f) typ.MethodList = append(typ.MethodList, f)
return false return false
case _Lparen:
p.syntaxError("cannot parenthesize embedded type")
f := new(Field)
f.pos = p.pos()
p.next()
f.Type = p.qualifiedName(nil)
p.want(_Rparen)
typ.MethodList = append(typ.MethodList, f)
return false
case _Operator:
if p.op == Tilde {
typ.MethodList = append(typ.MethodList, p.embeddedElem(nil))
return false
}
default:
pos := p.pos()
if t := p.typeOrNil(); t != nil {
f := new(Field)
f.pos = pos
f.Type = t
typ.MethodList = append(typ.MethodList, p.embeddedElem(f))
return false
}
}
p.syntaxError("expecting method or embedded element")
p.advance(_Semi, _Rbrace)
return false
}) })
return typ return typ

View file

@ -8,7 +8,8 @@ type _ func /* ERROR function type must have no type parameters */ [ /* ERROR em
type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]() type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]()
type _ func /* ERROR function type must have no type parameters */ [P any]() type _ func /* ERROR function type must have no type parameters */ [P any]()
var _ = func /* ERROR function literal must have no type parameters */ [P any]() {} var _ = (func /* ERROR function type must have no type parameters */ [P any]())(nil)
var _ = func /* ERROR function type must have no type parameters */ [P any]() {}
type _ interface{ type _ interface{
m /* ERROR interface method must have no type parameters */ [P any]() m /* ERROR interface method must have no type parameters */ [P any]()

View file

@ -0,0 +1,17 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
type _ interface {
int
(int)
(*int)
*([]byte)
~(int)
(int) | (string)
(int) | ~(string)
(/* ERROR unexpected ~ */ ~int)
(int /* ERROR unexpected \| */ | /* ERROR unexpected string */ string /* ERROR unexpected \) */ )
}

View file

@ -65,15 +65,17 @@ func _[_ t[t] | t[t]]() {}
// Single-expression type parameter lists and those that don't start // Single-expression type parameter lists and those that don't start
// with a (type parameter) name are considered array sizes. // with a (type parameter) name are considered array sizes.
// The term must be a valid expression (it could be a type - and then // The term must be a valid expression (it could be a type incl. a
// a type-checker will complain - but we don't allow ~ in the expr). // tilde term) but the type-checker will complain.
type ( type (
_[t] t _[t] t
_[/* ERROR unexpected ~ */ ~t] t
_[t|t] t _[t|t] t
_[/* ERROR unexpected ~ */ ~t|t] t
_[t| /* ERROR unexpected ~ */ ~t] t // These are invalid and the type-checker will complain.
_[/* ERROR unexpected ~ */ ~t|~t] t _[~t] t
_[~t|t] t
_[t|~t] t
_[~t|~t] t
) )
type ( type (

View file

@ -128,15 +128,33 @@ func TestIntendedInlining(t *testing.T) {
"ValidRune", "ValidRune",
}, },
"reflect": { "reflect": {
"Value.CanInt", "Value.Bool",
"Value.CanUint", "Value.Bytes",
"Value.CanFloat",
"Value.CanComplex",
"Value.CanAddr", "Value.CanAddr",
"Value.CanSet", "Value.CanComplex",
"Value.CanFloat",
"Value.CanInt",
"Value.CanInterface", "Value.CanInterface",
"Value.CanSet",
"Value.CanUint",
"Value.Cap",
"Value.Complex",
"Value.Float",
"Value.Int",
"Value.Interface",
"Value.IsNil",
"Value.IsValid", "Value.IsValid",
"Value.Kind",
"Value.Len",
"Value.MapRange", "Value.MapRange",
"Value.OverflowComplex",
"Value.OverflowFloat",
"Value.OverflowInt",
"Value.OverflowUint",
"Value.String",
"Value.Type",
"Value.Uint",
"Value.UnsafeAddr",
"Value.pointer", "Value.pointer",
"add", "add",
"align", "align",

View file

@ -735,7 +735,7 @@ func (check *Checker) declStmt(list []syntax.Decl) {
top := len(check.delayed) top := len(check.delayed)
// iota is the index of the current constDecl within the group // iota is the index of the current constDecl within the group
if first < 0 || list[index-1].(*syntax.ConstDecl).Group != s.Group { if first < 0 || s.Group == nil || list[index-1].(*syntax.ConstDecl).Group != s.Group {
first = index first = index
last = nil last = nil
} }

View file

@ -89,21 +89,11 @@ func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool {
func (check *Checker) overflow(x *operand) { func (check *Checker) overflow(x *operand) {
assert(x.mode == constant_) assert(x.mode == constant_)
// If the corresponding expression is an operation, use the
// operator position rather than the start of the expression
// as error position.
pos := syntax.StartPos(x.expr)
what := "" // operator description, if any
if op, _ := x.expr.(*syntax.Operation); op != nil {
pos = op.Pos()
what = opName(op)
}
if x.val.Kind() == constant.Unknown { if x.val.Kind() == constant.Unknown {
// TODO(gri) We should report exactly what went wrong. At the // TODO(gri) We should report exactly what went wrong. At the
// moment we don't have the (go/constant) API for that. // moment we don't have the (go/constant) API for that.
// See also TODO in go/constant/value.go. // See also TODO in go/constant/value.go.
check.error(pos, "constant result is not representable") check.error(opPos(x.expr), "constant result is not representable")
return return
} }
@ -119,14 +109,28 @@ func (check *Checker) overflow(x *operand) {
// Untyped integer values must not grow arbitrarily. // Untyped integer values must not grow arbitrarily.
const prec = 512 // 512 is the constant precision const prec = 512 // 512 is the constant precision
if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec { if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec {
check.errorf(pos, "constant %s overflow", what) check.errorf(opPos(x.expr), "constant %s overflow", opName(x.expr))
x.val = constant.MakeUnknown() x.val = constant.MakeUnknown()
} }
} }
// opName returns the name of an operation, or the empty string. // opPos returns the position of the operator if x is an operation;
// Only operations that might overflow are handled. // otherwise it returns the start position of x.
func opName(e *syntax.Operation) string { func opPos(x syntax.Expr) syntax.Pos {
switch op := x.(type) {
case nil:
return nopos // don't crash
case *syntax.Operation:
return op.Pos()
default:
return syntax.StartPos(x)
}
}
// opName returns the name of the operation if x is an operation
// that might overflow; otherwise it returns the empty string.
func opName(x syntax.Expr) string {
if e, _ := x.(*syntax.Operation); e != nil {
op := int(e.Op) op := int(e.Op)
if e.Y == nil { if e.Y == nil {
if op < len(op2str1) { if op < len(op2str1) {
@ -137,6 +141,7 @@ func opName(e *syntax.Operation) string {
return op2str2[op] return op2str2[op]
} }
} }
}
return "" return ""
} }
@ -203,6 +208,12 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
x.typ = ch.elem x.typ = ch.elem
check.hasCallOrRecv = true check.hasCallOrRecv = true
return return
case syntax.Tilde:
// Provide a better error position and message than what check.op below could do.
check.error(e, "cannot use ~ outside of interface or type constraint")
x.mode = invalid
return
} }
if !check.op(unaryOpPredicates, x, e.Op) { if !check.op(unaryOpPredicates, x, e.Op) {

View file

@ -340,7 +340,7 @@ func (check *Checker) collectObjects() {
case *syntax.ConstDecl: case *syntax.ConstDecl:
// iota is the index of the current constDecl within the group // iota is the index of the current constDecl within the group
if first < 0 || file.DeclList[index-1].(*syntax.ConstDecl).Group != s.Group { if first < 0 || s.Group == nil || file.DeclList[index-1].(*syntax.ConstDecl).Group != s.Group {
first = index first = index
last = nil last = nil
} }

View file

@ -166,10 +166,11 @@ func (s *StdSizes) Sizeof(T Type) int64 {
// common architecture word sizes and alignments // common architecture word sizes and alignments
var gcArchSizes = map[string]*StdSizes{ var gcArchSizes = map[string]*StdSizes{
"386": {4, 4}, "386": {4, 4},
"arm": {4, 4},
"arm64": {8, 8},
"amd64": {8, 8}, "amd64": {8, 8},
"amd64p32": {4, 8}, "amd64p32": {4, 8},
"arm": {4, 4},
"arm64": {8, 8},
"loong64": {8, 8},
"mips": {4, 4}, "mips": {4, 4},
"mipsle": {4, 4}, "mipsle": {4, 4},
"mips64": {8, 8}, "mips64": {8, 8},
@ -188,7 +189,7 @@ var gcArchSizes = map[string]*StdSizes{
// The result is nil if a compiler/architecture pair is not known. // The result is nil if a compiler/architecture pair is not known.
// //
// Supported architectures for compiler "gc": // Supported architectures for compiler "gc":
// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", // "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
func SizesFor(compiler, arch string) Sizes { func SizesFor(compiler, arch string) Sizes {
var m map[string]*StdSizes var m map[string]*StdSizes

View file

@ -349,6 +349,25 @@ const _ = unsafe.Sizeof(func() {
assert(iota == 0) assert(iota == 0)
}) })
// issue #52438
const i1 = iota
const i2 = iota
const i3 = iota
func _() {
assert(i1 == 0)
assert(i2 == 0)
assert(i3 == 0)
const i4 = iota
const i5 = iota
const i6 = iota
assert(i4 == 0)
assert(i5 == 0)
assert(i6 == 0)
}
// untyped constants must not get arbitrarily large // untyped constants must not get arbitrarily large
const prec = 512 // internal maximum precision for integers const prec = 512 // internal maximum precision for integers
const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1 const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1

View file

@ -178,3 +178,10 @@ func _() {
_ = -g /* ERROR 2-valued g */ () _ = -g /* ERROR 2-valued g */ ()
_ = <-g /* ERROR 2-valued g */ () _ = <-g /* ERROR 2-valued g */ ()
} }
// ~ is accepted as unary operator only permitted in interface type elements
var (
_ = ~ /* ERROR cannot use ~ outside of interface or type constraint */ 0
_ = ~ /* ERROR cannot use ~ outside of interface or type constraint */ "foo"
_ = ~ /* ERROR cannot use ~ outside of interface or type constraint */ i0
)

View file

@ -22,4 +22,4 @@ type _[P /* ERROR non-function P */ (*int)] int
type _[P *struct /* ERROR "not an expression" */ {}| int /* ERROR "not an expression" */ ] struct{} type _[P *struct /* ERROR "not an expression" */ {}| int /* ERROR "not an expression" */ ] struct{}
// The following fails to parse, due to the '~' // The following fails to parse, due to the '~'
type _[P *struct /* ERROR "not an expression" */ {}|~ /* ERROR "unexpected ~" */ int] struct{} type _[P *struct /* ERROR "not an expression" */ {}|~int /* ERROR "not an expression" */ ] struct{}

View file

@ -0,0 +1,11 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
func _() {
const x = 0
x /* ERROR cannot assign to x */ += 1
x /* ERROR cannot assign to x */ ++
}

View file

@ -8,6 +8,7 @@ import (
"go/constant" "go/constant"
"cmd/compile/internal/base" "cmd/compile/internal/base"
"cmd/compile/internal/compare"
"cmd/compile/internal/ir" "cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata" "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen" "cmd/compile/internal/ssagen"
@ -178,7 +179,7 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
andor = ir.OOROR andor = ir.OOROR
} }
var expr ir.Node var expr ir.Node
compare := func(el, er ir.Node) { comp := func(el, er ir.Node) {
a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er) a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
if expr == nil { if expr == nil {
expr = a expr = a
@ -186,18 +187,26 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
expr = ir.NewLogicalExpr(base.Pos, andor, expr, a) expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
} }
} }
and := func(cond ir.Node) {
if expr == nil {
expr = cond
} else {
expr = ir.NewLogicalExpr(base.Pos, andor, expr, cond)
}
}
cmpl = safeExpr(cmpl, init) cmpl = safeExpr(cmpl, init)
cmpr = safeExpr(cmpr, init) cmpr = safeExpr(cmpr, init)
if t.IsStruct() { if t.IsStruct() {
for _, f := range t.Fields().Slice() { conds := compare.EqStruct(t, cmpl, cmpr)
sym := f.Sym if n.Op() == ir.OEQ {
if sym.IsBlank() { for _, cond := range conds {
continue and(cond)
}
} else {
for _, cond := range conds {
notCond := ir.NewUnaryExpr(base.Pos, ir.ONOT, cond)
and(notCond)
} }
compare(
ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
)
} }
} else { } else {
step := int64(1) step := int64(1)
@ -221,7 +230,7 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
step = 1 step = 1
} }
if step == 1 { if step == 1 {
compare( comp(
ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)), ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)), ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
) )
@ -249,7 +258,7 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Size()*offset)) rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Size()*offset))
cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
} }
compare(cmplw, cmprw) comp(cmplw, cmprw)
i += step i += step
remains -= step * t.Elem().Size() remains -= step * t.Elem().Size()
} }
@ -270,7 +279,7 @@ func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
n.Y = cheapExpr(n.Y, init) n.Y = cheapExpr(n.Y, init)
n.X = cheapExpr(n.X, init) n.X = cheapExpr(n.X, init)
eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y) eqtab, eqdata := compare.EqInterface(n.X, n.Y)
var cmp ir.Node var cmp ir.Node
if n.Op() == ir.OEQ { if n.Op() == ir.OEQ {
cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
@ -384,7 +393,7 @@ func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
// prepare for rewrite below // prepare for rewrite below
n.X = cheapExpr(n.X, init) n.X = cheapExpr(n.X, init)
n.Y = cheapExpr(n.Y, init) n.Y = cheapExpr(n.Y, init)
eqlen, eqmem := reflectdata.EqString(n.X, n.Y) eqlen, eqmem := compare.EqString(n.X, n.Y)
// quick check of len before full compare for == or !=. // quick check of len before full compare for == or !=.
// memequal then tests equality up to length len. // memequal then tests equality up to length len.
if n.Op() == ir.OEQ { if n.Op() == ir.OEQ {

49
src/cmd/dist/test.go vendored
View file

@ -556,6 +556,55 @@ func (t *tester) registerTests() {
}) })
} }
// morestack tests. We only run these on in long-test mode
// (with GO_TEST_SHORT=false) because the runtime test is
// already quite long and mayMoreStackMove makes it about
// twice as slow.
if !t.compileOnly && short() == "false" {
// hooks is the set of maymorestack hooks to test with.
hooks := []string{"mayMoreStackPreempt", "mayMoreStackMove"}
// pkgs is the set of test packages to run.
pkgs := []string{"runtime", "reflect", "sync"}
// hookPkgs is the set of package patterns to apply
// the maymorestack hook to.
hookPkgs := []string{"runtime/...", "reflect", "sync"}
// unhookPkgs is the set of package patterns to
// exclude from hookPkgs.
unhookPkgs := []string{"runtime/testdata/..."}
for _, hook := range hooks {
// Construct the build flags to use the
// maymorestack hook in the compiler and
// assembler. We pass this via the GOFLAGS
// environment variable so that it applies to
// both the test itself and to binaries built
// by the test.
goFlagsList := []string{}
for _, flag := range []string{"-gcflags", "-asmflags"} {
for _, hookPkg := range hookPkgs {
goFlagsList = append(goFlagsList, flag+"="+hookPkg+"=-d=maymorestack=runtime."+hook)
}
for _, unhookPkg := range unhookPkgs {
goFlagsList = append(goFlagsList, flag+"="+unhookPkg+"=")
}
}
goFlags := strings.Join(goFlagsList, " ")
for _, pkg := range pkgs {
pkg := pkg
testName := hook + ":" + pkg
t.tests = append(t.tests, distTest{
name: testName,
heading: "maymorestack=" + hook,
fn: func(dt *distTest) error {
cmd := t.addCmd(dt, "src", t.goTest(), t.timeout(600), pkg, "-short")
setEnv(cmd, "GOFLAGS", goFlags)
return nil
},
})
}
}
}
// This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests. // This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests.
// See issue 18153. // See issue 18153.
if goos == "linux" { if goos == "linux" {

View file

@ -4,12 +4,20 @@
package base package base
import (
"fmt"
"path/filepath"
)
// AppendPWD returns the result of appending PWD=dir to the environment base. // AppendPWD returns the result of appending PWD=dir to the environment base.
// //
// The resulting environment makes os.Getwd more efficient for a subprocess // The resulting environment makes os.Getwd more efficient for a subprocess
// running in dir. // running in dir.
func AppendPWD(base []string, dir string) []string { func AppendPWD(base []string, dir string) []string {
// Internally we only use absolute paths, so dir is absolute. // POSIX requires PWD to be absolute.
// Even if dir is not absolute, no harm done. // Internally we only use absolute paths, so dir should already be absolute.
if !filepath.IsAbs(dir) {
panic(fmt.Sprintf("AppendPWD with relative path %q", dir))
}
return append(base, "PWD="+dir) return append(base, "PWD="+dir)
} }

View file

@ -5,7 +5,9 @@
package generate package generate
import ( import (
"internal/testenv"
"os" "os"
"path/filepath"
"reflect" "reflect"
"runtime" "runtime"
"testing" "testing"
@ -41,10 +43,11 @@ var splitTests = []splitTest{
} }
func TestGenerateCommandParse(t *testing.T) { func TestGenerateCommandParse(t *testing.T) {
dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
g := &Generator{ g := &Generator{
r: nil, // Unused here. r: nil, // Unused here.
path: "/usr/ken/sys/proc.go", path: filepath.Join(dir, "proc.go"),
dir: "/usr/ken/sys", dir: dir,
file: "proc.go", file: "proc.go",
pkg: "sys", pkg: "sys",
commands: make(map[string][]string), commands: make(map[string][]string),
@ -84,10 +87,11 @@ var defEnvMap = map[string]string{
// before executing the test. i.e., execute the split as if it // before executing the test. i.e., execute the split as if it
// processing that source line. // processing that source line.
func TestGenerateCommandShorthand(t *testing.T) { func TestGenerateCommandShorthand(t *testing.T) {
dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
g := &Generator{ g := &Generator{
r: nil, // Unused here. r: nil, // Unused here.
path: "/usr/ken/sys/proc.go", path: filepath.Join(dir, "proc.go"),
dir: "/usr/ken/sys", dir: dir,
file: "proc.go", file: "proc.go",
pkg: "sys", pkg: "sys",
commands: make(map[string][]string), commands: make(map[string][]string),
@ -222,10 +226,11 @@ var splitTestsLines = []splitTestWithLine{
// before executing the test. i.e., execute the split as if it // before executing the test. i.e., execute the split as if it
// processing that source line. // processing that source line.
func TestGenerateCommandShortHand2(t *testing.T) { func TestGenerateCommandShortHand2(t *testing.T) {
dir := filepath.Join(testenv.GOROOT(t), "src", "sys")
g := &Generator{ g := &Generator{
r: nil, // Unused here. r: nil, // Unused here.
path: "/usr/ken/sys/proc.go", path: filepath.Join(dir, "proc.go"),
dir: "/usr/ken/sys", dir: dir,
file: "proc.go", file: "proc.go",
pkg: "sys", pkg: "sys",
commands: make(map[string][]string), commands: make(map[string][]string),

View file

@ -568,6 +568,13 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
IgnoreImports: *listFind, IgnoreImports: *listFind,
ModResolveTests: *listTest, ModResolveTests: *listTest,
LoadVCS: true, LoadVCS: true,
// SuppressDeps is set if the user opts to explicitly ask for the json fields they
// need, don't ask for Deps or DepsErrors. It's not set when using a template string,
// even if *listFmt doesn't contain .Deps because Deps are used to find import cycles
// for test variants of packages and users who have been providing format strings
// might not expect those errors to stop showing up.
// See issue #52443.
SuppressDeps: !listJsonFields.needAny("Deps", "DepsErrors"),
} }
pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) pkgs := load.PackagesAndErrors(ctx, pkgOpts, args)
if !*listE { if !*listE {

View file

@ -1944,7 +1944,9 @@ func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *
} }
} }
p.Internal.Imports = imports p.Internal.Imports = imports
if !opts.SuppressDeps {
p.collectDeps() p.collectDeps()
}
if p.Error == nil && p.Name == "main" && !p.Internal.ForceLibrary && len(p.DepsErrors) == 0 { if p.Error == nil && p.Name == "main" && !p.Internal.ForceLibrary && len(p.DepsErrors) == 0 {
// TODO(bcmills): loading VCS metadata can be fairly slow. // TODO(bcmills): loading VCS metadata can be fairly slow.
// Consider starting this as a background goroutine and retrieving the result // Consider starting this as a background goroutine and retrieving the result
@ -2685,6 +2687,12 @@ type PackageOpts struct {
// LoadVCS controls whether we also load version-control metadata for main packages. // LoadVCS controls whether we also load version-control metadata for main packages.
LoadVCS bool LoadVCS bool
// NeedDepsFields is true if the caller does not need Deps and DepsErrors to be populated
// on the package. TestPackagesAndErrors examines the Deps field to determine if the test
// variant has an import cycle, so SuppressDeps should not be set if TestPackagesAndErrors
// will be called on the package.
SuppressDeps bool
} }
// PackagesAndErrors returns the packages named by the command line arguments // PackagesAndErrors returns the packages named by the command line arguments

View file

@ -605,11 +605,13 @@ func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (str
pkg := pathInModuleCache(ctx, absDir, rs) pkg := pathInModuleCache(ctx, absDir, rs)
if pkg == "" { if pkg == "" {
scope := "main module or its selected dependencies"
if inWorkspaceMode() { if inWorkspaceMode() {
scope = "modules listed in go.work or their selected dependencies" if mr := findModuleRoot(absDir); mr != "" {
return "", fmt.Errorf("directory %s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using go work use %s", base.ShortPath(absDir), base.ShortPath(mr))
} }
return "", fmt.Errorf("directory %s outside %s", base.ShortPath(absDir), scope) return "", fmt.Errorf("directory %s outside modules listed in go.work or their selected dependencies", base.ShortPath(absDir))
}
return "", fmt.Errorf("directory %s outside main module or its selected dependencies", base.ShortPath(absDir))
} }
return pkg, nil return pkg, nil
} }

View file

@ -22,7 +22,6 @@ import (
"sync" "sync"
"time" "time"
"cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/go/internal/cfg"
"cmd/go/internal/search" "cmd/go/internal/search"
"cmd/go/internal/str" "cmd/go/internal/str"
@ -657,7 +656,6 @@ func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([
cmd := exec.Command(v.Cmd, args...) cmd := exec.Command(v.Cmd, args...)
cmd.Dir = dir cmd.Dir = dir
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir)
if cfg.BuildX { if cfg.BuildX {
fmt.Fprintf(os.Stderr, "cd %s\n", dir) fmt.Fprintf(os.Stderr, "cd %s\n", dir)
fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " ")) fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " "))
@ -669,7 +667,7 @@ func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
os.Stderr.Write(ee.Stderr) os.Stderr.Write(ee.Stderr)
} else { } else {
fmt.Fprintf(os.Stderr, err.Error()) fmt.Fprintln(os.Stderr, err.Error())
} }
} }
} }
@ -678,14 +676,24 @@ func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([
// Ping pings to determine scheme to use. // Ping pings to determine scheme to use.
func (v *Cmd) Ping(scheme, repo string) error { func (v *Cmd) Ping(scheme, repo string) error {
return v.runVerboseOnly(".", v.PingCmd, "scheme", scheme, "repo", repo) // Run the ping command in an arbitrary working directory,
// but don't let the current working directory pollute the results.
// In module mode, we expect GOMODCACHE to exist and be a safe place for
// commands; in GOPATH mode, we expect that to be true of GOPATH/src.
dir := cfg.GOMODCACHE
if !cfg.ModulesEnabled {
dir = filepath.Join(cfg.BuildContext.GOPATH, "src")
}
os.MkdirAll(dir, 0777) // Ignore errors — if unsuccessful, the command will likely fail.
return v.runVerboseOnly(dir, v.PingCmd, "scheme", scheme, "repo", repo)
} }
// Create creates a new copy of repo in dir. // Create creates a new copy of repo in dir.
// The parent of dir must exist; dir must not. // The parent of dir must exist; dir must not.
func (v *Cmd) Create(dir, repo string) error { func (v *Cmd) Create(dir, repo string) error {
for _, cmd := range v.CreateCmd { for _, cmd := range v.CreateCmd {
if err := v.run(".", cmd, "dir", dir, "repo", repo); err != nil { if err := v.run(filepath.Dir(dir), cmd, "dir", dir, "repo", repo); err != nil {
return err return err
} }
} }

View file

@ -160,7 +160,6 @@ func (b *Builder) toolID(name string) string {
cmdline := str.StringList(cfg.BuildToolexec, path, "-V=full") cmdline := str.StringList(cfg.BuildToolexec, path, "-V=full")
cmd := exec.Command(cmdline[0], cmdline[1:]...) cmd := exec.Command(cmdline[0], cmdline[1:]...)
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir)
var stdout, stderr bytes.Buffer var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout cmd.Stdout = &stdout
cmd.Stderr = &stderr cmd.Stderr = &stderr
@ -219,9 +218,8 @@ func (b *Builder) gccToolID(name, language string) (string, error) {
// compile an empty file on standard input. // compile an empty file on standard input.
cmdline := str.StringList(cfg.BuildToolexec, name, "-###", "-x", language, "-c", "-") cmdline := str.StringList(cfg.BuildToolexec, name, "-###", "-x", language, "-c", "-")
cmd := exec.Command(cmdline[0], cmdline[1:]...) cmd := exec.Command(cmdline[0], cmdline[1:]...)
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir)
// Force untranslated output so that we see the string "version". // Force untranslated output so that we see the string "version".
cmd.Env = append(cmd.Env, "LC_ALL=C") cmd.Env = append(os.Environ(), "LC_ALL=C")
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return "", fmt.Errorf("%s: %v; output: %q", name, err, out) return "", fmt.Errorf("%s: %v; output: %q", name, err, out)

View file

@ -2116,8 +2116,10 @@ func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...any) ([
cmd.Stderr = &buf cmd.Stderr = &buf
cleanup := passLongArgsInResponseFiles(cmd) cleanup := passLongArgsInResponseFiles(cmd)
defer cleanup() defer cleanup()
if dir != "." {
cmd.Dir = dir cmd.Dir = dir
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir) }
cmd.Env = cmd.Environ() // Pre-allocate with correct PWD.
// Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools. // Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools.
// It doesn't really matter if -toolexec isn't being used. // It doesn't really matter if -toolexec isn't being used.
@ -2606,8 +2608,7 @@ func (b *Builder) gccSupportsFlag(compiler []string, flag string) bool {
} }
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
cmd.Dir = b.WorkDir cmd.Dir = b.WorkDir
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir) cmd.Env = append(cmd.Environ(), "LC_ALL=C")
cmd.Env = append(cmd.Env, "LC_ALL=C")
out, _ := cmd.CombinedOutput() out, _ := cmd.CombinedOutput()
// GCC says "unrecognized command line option". // GCC says "unrecognized command line option".
// clang says "unknown argument". // clang says "unknown argument".
@ -3071,7 +3072,7 @@ var (
) )
func (b *Builder) swigDoVersionCheck() error { func (b *Builder) swigDoVersionCheck() error {
out, err := b.runOut(nil, "", nil, "swig", "-version") out, err := b.runOut(nil, ".", nil, "swig", "-version")
if err != nil { if err != nil {
return err return err
} }

View file

@ -21,6 +21,11 @@ cmp stdout want-json-name.txt
go list -json=ImportPath,Name,GoFiles,Imports go list -json=ImportPath,Name,GoFiles,Imports
cmp stdout want-json-multiple.txt cmp stdout want-json-multiple.txt
# Test -json=<field> with Deps outputs the Deps field.
go list -json=Deps
stdout '"Deps": \['
stdout '"errors",'
-- go.mod -- -- go.mod --
module example.com/a module example.com/a

View file

@ -10,7 +10,7 @@ env GOSUMDB=off
# For a while, (*modfetch.codeRepo).Stat was not checking for a go.mod file, # For a while, (*modfetch.codeRepo).Stat was not checking for a go.mod file,
# which would produce a hard error at the subsequent call to GoMod. # which would produce a hard error at the subsequent call to GoMod.
go get go get -v
-- go.mod -- -- go.mod --
module example.com module example.com

View file

@ -6,8 +6,8 @@
! go list ./... ! go list ./...
stderr 'pattern ./...: directory prefix . does not contain modules listed in go.work or their selected dependencies' stderr 'pattern ./...: directory prefix . does not contain modules listed in go.work or their selected dependencies'
! go list ./a ! go list ./a/c
stderr 'directory a outside modules listed in go.work' stderr 'directory a[\\/]c is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using go work use a'
-- go.work -- -- go.work --
go 1.18 go 1.18
@ -19,6 +19,8 @@ module example.com/a
go 1.18 go 1.18
-- a/a.go -- -- a/a.go --
package a package a
-- a/c/c.go --
package c
-- b/go.mod -- -- b/go.mod --
module example.com/b module example.com/b

View file

@ -76,6 +76,11 @@ func initParserMode() {
if *allErrors { if *allErrors {
parserMode |= parser.AllErrors parserMode |= parser.AllErrors
} }
// Both -r and -s make use of go/ast's object resolution.
// If neither is being used, avoid that unnecessary work.
if *rewriteRule == "" && !*simplifyAST {
parserMode |= parser.SkipObjectResolution
}
} }
func isGoFile(f fs.DirEntry) bool { func isGoFile(f fs.DirEntry) bool {

View file

@ -726,11 +726,13 @@ func genFuncInfoSyms(ctxt *Link) {
} }
o.Write(&b) o.Write(&b)
p := b.Bytes()
isym := &LSym{ isym := &LSym{
Type: objabi.SDATA, // for now, I don't think it matters Type: objabi.SDATA, // for now, I don't think it matters
PkgIdx: goobj.PkgIdxSelf, PkgIdx: goobj.PkgIdxSelf,
SymIdx: symidx, SymIdx: symidx,
P: append([]byte(nil), b.Bytes()...), P: append([]byte(nil), p...),
Size: int64(len(p)),
} }
isym.Set(AttrIndexed, true) isym.Set(AttrIndexed, true)
symidx++ symidx++

View file

@ -101,7 +101,7 @@ func (ctxt *Link) doStackCheck() {
// the same function multiple times at different // the same function multiple times at different
// depths, but lets us find all paths. // depths, but lets us find all paths.
for _, root := range roots { for _, root := range roots {
ctxt.Errorf(root, "nosplit stack overflow") ctxt.Errorf(root, "nosplit stack over %d byte limit", limit)
chain := []stackCheckChain{{stackCheckEdge{0, root}, false}} chain := []stackCheckChain{{stackCheckEdge{0, root}, false}}
sc.report(root, limit, &chain) sc.report(root, limit, &chain)
} }

View file

@ -5,13 +5,12 @@
package ld package ld
import ( import (
"cmd/internal/objabi"
"cmd/internal/sys"
"fmt" "fmt"
"internal/testenv" "internal/testenv"
"os" "os"
"os/exec" "os/exec"
"regexp" "regexp"
"strconv"
"testing" "testing"
) )
@ -24,7 +23,7 @@ func TestStackCheckOutput(t *testing.T) {
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", os.DevNull, "./testdata/stackcheck") cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", os.DevNull, "./testdata/stackcheck")
// The rules for computing frame sizes on all of the // The rules for computing frame sizes on all of the
// architectures are complicated, so just do this on amd64. // architectures are complicated, so just do this on amd64.
cmd.Env = append(os.Environ(), "GOARCH=amd64") cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux")
outB, err := cmd.CombinedOutput() outB, err := cmd.CombinedOutput()
if err == nil { if err == nil {
@ -34,13 +33,13 @@ func TestStackCheckOutput(t *testing.T) {
t.Logf("linker output:\n%s", out) t.Logf("linker output:\n%s", out)
// Construct expected stanzas // Get expected limit.
arch := sys.ArchAMD64 limitRe := regexp.MustCompile("nosplit stack over ([0-9]+) byte limit")
call := 0 m := limitRe.FindStringSubmatch(out)
if !arch.HasLR { if m == nil {
call = arch.RegSize t.Fatalf("no overflow errors in output")
} }
limit := objabi.StackLimit - call limit, _ := strconv.Atoi(m[1])
wantMap := map[string]string{ wantMap := map[string]string{
"main.startSelf": fmt.Sprintf( "main.startSelf": fmt.Sprintf(
@ -67,7 +66,7 @@ func TestStackCheckOutput(t *testing.T) {
} }
// Parse stanzas // Parse stanzas
stanza := regexp.MustCompile(`^(.*): nosplit stack overflow\n(.*\n(?: .*\n)*)`) stanza := regexp.MustCompile(`^(.*): nosplit stack over [0-9]+ byte limit\n(.*\n(?: .*\n)*)`)
// Strip comments from cmd/go // Strip comments from cmd/go
out = regexp.MustCompile(`(?m)^#.*\n`).ReplaceAllString(out, "") out = regexp.MustCompile(`(?m)^#.*\n`).ReplaceAllString(out, "")
for len(out) > 0 { for len(out) > 0 {

View file

@ -17,7 +17,7 @@ The file was generated by catapult's `vulcanize_trace_viewer` command.
$ git clone https://chromium.googlesource.com/catapult $ git clone https://chromium.googlesource.com/catapult
$ cd catapult $ cd catapult
$ ./tracing/bin/vulcanize_trace_viewer --config=full $ ./tracing/bin/vulcanize_trace_viewer --config=full
$ cp tracing/bin/trace_viewer_full.html $GOROOT/misc/trace/trace_viewer_full.html $ cp tracing/bin/trace_viewer_full.html $GOROOT/src/cmd/trace/static/trace_viewer_full.html
``` ```
We are supposed to use --config=lean (produces smaller html), We are supposed to use --config=lean (produces smaller html),
@ -31,7 +31,7 @@ to import the `trace_viewer_full.html`.
This is copied from the catapult repo. This is copied from the catapult repo.
``` ```
$ cp third_party/polymer/components/webcomponentsjs/webcomponents.min.js $GOROOT/misc/trace/webcomponents.min.js $ cp third_party/polymer/components/webcomponentsjs/webcomponents.min.js $GOROOT/src/cmd/trace/static/webcomponents.min.js
``` ```
## Licenses ## Licenses

View file

@ -6,6 +6,7 @@ package main
import ( import (
"cmd/internal/traceviewer" "cmd/internal/traceviewer"
"embed"
"encoding/json" "encoding/json"
"fmt" "fmt"
"internal/trace" "internal/trace"
@ -13,8 +14,6 @@ import (
"log" "log"
"math" "math"
"net/http" "net/http"
"path/filepath"
"runtime"
"runtime/debug" "runtime/debug"
"sort" "sort"
"strconv" "strconv"
@ -22,13 +21,16 @@ import (
"time" "time"
) )
//go:embed static/trace_viewer_full.html static/webcomponents.min.js
var staticContent embed.FS
func init() { func init() {
http.HandleFunc("/trace", httpTrace) http.HandleFunc("/trace", httpTrace)
http.HandleFunc("/jsontrace", httpJsonTrace) http.HandleFunc("/jsontrace", httpJsonTrace)
http.HandleFunc("/trace_viewer_html", httpTraceViewerHTML) http.Handle("/static/", http.FileServer(http.FS(staticContent)))
http.HandleFunc("/webcomponents.min.js", webcomponentsJS)
} }
// httpTrace serves either whole trace (goid==0) or trace for goid goroutine. // httpTrace serves either whole trace (goid==0) or trace for goid goroutine.
func httpTrace(w http.ResponseWriter, r *http.Request) { func httpTrace(w http.ResponseWriter, r *http.Request) {
_, err := parseTrace() _, err := parseTrace()
@ -50,19 +52,19 @@ func httpTrace(w http.ResponseWriter, r *http.Request) {
var templTrace = ` var templTrace = `
<html> <html>
<head> <head>
<script src="/webcomponents.min.js"></script> <script src="/static/webcomponents.min.js"></script>
<script> <script>
'use strict'; 'use strict';
function onTraceViewerImportFail() { function onTraceViewerImportFail() {
document.addEventListener('DOMContentLoaded', function() { document.addEventListener('DOMContentLoaded', function() {
document.body.textContent = document.body.textContent =
'/trace_viewer_full.html is missing. File a bug in https://golang.org/issue'; '/static/trace_viewer_full.html is missing. File a bug in https://golang.org/issue';
}); });
} }
</script> </script>
<link rel="import" href="/trace_viewer_html" <link rel="import" href="/static/trace_viewer_full.html"
onerror="onTraceViewerImportFail(event)"> onerror="onTraceViewerImportFail(event)">
<style type="text/css"> <style type="text/css">
@ -173,16 +175,6 @@ function onTraceViewerImportFail() {
</html> </html>
` `
// httpTraceViewerHTML serves static part of trace-viewer.
// This URL is queried from templTrace HTML.
func httpTraceViewerHTML(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "trace_viewer_full.html"))
}
func webcomponentsJS(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath.Join(runtime.GOROOT(), "misc", "trace", "webcomponents.min.js"))
}
// httpJsonTrace serves json trace, requested from within templTrace HTML. // httpJsonTrace serves json trace, requested from within templTrace HTML.
func httpJsonTrace(w http.ResponseWriter, r *http.Request) { func httpJsonTrace(w http.ResponseWriter, r *http.Request) {
defer debug.FreeOSMemory() defer debug.FreeOSMemory()

View file

@ -146,7 +146,7 @@ func randFieldElement(c elliptic.Curve, rand io.Reader) (k *big.Int, err error)
params := c.Params() params := c.Params()
// Note that for P-521 this will actually be 63 bits more than the order, as // Note that for P-521 this will actually be 63 bits more than the order, as
// division rounds down, but the extra bit is inconsequential. // division rounds down, but the extra bit is inconsequential.
b := make([]byte, params.BitSize/8+8) // TODO: use params.N.BitLen() b := make([]byte, params.N.BitLen()/8+8)
_, err = io.ReadFull(rand, b) _, err = io.ReadFull(rand, b)
if err != nil { if err != nil {
return return
@ -264,13 +264,13 @@ func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err err
// Create a CSPRNG that xors a stream of zeros with // Create a CSPRNG that xors a stream of zeros with
// the output of the AES-CTR instance. // the output of the AES-CTR instance.
csprng := cipher.StreamReader{ csprng := &cipher.StreamReader{
R: zeroReader, R: zeroReader,
S: cipher.NewCTR(block, []byte(aesIV)), S: cipher.NewCTR(block, []byte(aesIV)),
} }
c := priv.PublicKey.Curve c := priv.PublicKey.Curve
return sign(priv, &csprng, c, hash) return sign(priv, csprng, c, hash)
} }
func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash []byte) (r, s *big.Int, err error) { func signGeneric(priv *PrivateKey, csprng *cipher.StreamReader, c elliptic.Curve, hash []byte) (r, s *big.Int, err error) {
@ -398,16 +398,14 @@ func VerifyASN1(pub *PublicKey, hash, sig []byte) bool {
return Verify(pub, hash, r, s) return Verify(pub, hash, r, s)
} }
type zr struct { type zr struct{}
io.Reader
}
// Read replaces the contents of dst with zeros. // Read replaces the contents of dst with zeros. It is safe for concurrent use.
func (z *zr) Read(dst []byte) (n int, err error) { func (zr) Read(dst []byte) (n int, err error) {
for i := range dst { for i := range dst {
dst[i] = 0 dst[i] = 0
} }
return len(dst), nil return len(dst), nil
} }
var zeroReader = &zr{} var zeroReader = zr{}

View file

@ -74,11 +74,22 @@ func TestEd25519Vectors(t *testing.T) {
func downloadEd25519Vectors(t *testing.T) []byte { func downloadEd25519Vectors(t *testing.T) []byte {
testenv.MustHaveExternalNetwork(t) testenv.MustHaveExternalNetwork(t)
// Create a temp dir and modcache subdir.
d := t.TempDir()
// Create a spot for the modcache.
modcache := filepath.Join(d, "modcache")
if err := os.Mkdir(modcache, 0777); err != nil {
t.Fatal(err)
}
t.Setenv("GO111MODULE", "on")
t.Setenv("GOMODCACHE", modcache)
// Download the JSON test file from the GOPROXY with `go mod download`, // Download the JSON test file from the GOPROXY with `go mod download`,
// pinning the version so test and module caching works as expected. // pinning the version so test and module caching works as expected.
goTool := testenv.GoToolPath(t) goTool := testenv.GoToolPath(t)
path := "filippo.io/mostly-harmless/ed25519vectors@v0.0.0-20210322192420-30a2d7243a94" path := "filippo.io/mostly-harmless/ed25519vectors@v0.0.0-20210322192420-30a2d7243a94"
cmd := exec.Command(goTool, "mod", "download", "-json", path) cmd := exec.Command(goTool, "mod", "download", "-modcacherw", "-json", path)
// TODO: enable the sumdb once the TryBots proxy supports it. // TODO: enable the sumdb once the TryBots proxy supports it.
cmd.Env = append(os.Environ(), "GONOSUMDB=*") cmd.Env = append(os.Environ(), "GONOSUMDB=*")
output, err := cmd.Output() output, err := cmd.Output()

View file

@ -36,295 +36,6 @@ type Curve interface {
ScalarBaseMult(k []byte) (x, y *big.Int) ScalarBaseMult(k []byte) (x, y *big.Int)
} }
func matchesSpecificCurve(params *CurveParams, available ...Curve) (Curve, bool) {
for _, c := range available {
if params == c.Params() {
return c, true
}
}
return nil, false
}
// CurveParams contains the parameters of an elliptic curve and also provides
// a generic, non-constant time implementation of Curve.
type CurveParams struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the curve equation
Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field
Name string // the canonical name of the curve
}
func (curve *CurveParams) Params() *CurveParams {
return curve
}
// CurveParams operates, internally, on Jacobian coordinates. For a given
// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
// calculation can be performed within the transform (as in ScalarMult and
// ScalarBaseMult). But even for Add and Double, it's faster to apply and
// reverse the transform than to operate in affine coordinates.
// polynomial returns x³ - 3x + b.
func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
threeX := new(big.Int).Lsh(x, 1)
threeX.Add(threeX, x)
x3.Sub(x3, threeX)
x3.Add(x3, curve.B)
x3.Mod(x3, curve.P)
return x3
}
func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.IsOnCurve(x, y)
}
if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
return false
}
// y² = x³ - 3x + b
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
return curve.polynomial(x).Cmp(y2) == 0
}
// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
// y are zero, it assumes that they represent the point at infinity because (0,
// 0) is not on the any of the curves handled here.
func zForAffine(x, y *big.Int) *big.Int {
z := new(big.Int)
if x.Sign() != 0 || y.Sign() != 0 {
z.SetInt64(1)
}
return z
}
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file. If the point is ∞ it returns 0, 0.
func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
if z.Sign() == 0 {
return new(big.Int), new(big.Int)
}
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
xOut = new(big.Int).Mul(x, zinvsq)
xOut.Mod(xOut, curve.P)
zinvsq.Mul(zinvsq, zinv)
yOut = new(big.Int).Mul(y, zinvsq)
yOut.Mod(yOut, curve.P)
return
}
func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.Add(x1, y1, x2, y2)
}
z1 := zForAffine(x1, y1)
z2 := zForAffine(x2, y2)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
if z1.Sign() == 0 {
x3.Set(x2)
y3.Set(y2)
z3.Set(z2)
return x3, y3, z3
}
if z2.Sign() == 0 {
x3.Set(x1)
y3.Set(y1)
z3.Set(z1)
return x3, y3, z3
}
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
xEqual := h.Sign() == 0
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
yEqual := r.Sign() == 0
if xEqual && yEqual {
return curve.doubleJacobian(x1, y1, z1)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3.Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3.Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3.Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
z3.Sub(z3, z2z2)
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.Double(x1, y1)
}
z1 := zForAffine(x1, y1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
delta := new(big.Int).Mul(z, z)
delta.Mod(delta, curve.P)
gamma := new(big.Int).Mul(y, y)
gamma.Mod(gamma, curve.P)
alpha := new(big.Int).Sub(x, delta)
if alpha.Sign() == -1 {
alpha.Add(alpha, curve.P)
}
alpha2 := new(big.Int).Add(x, delta)
alpha.Mul(alpha, alpha2)
alpha2.Set(alpha)
alpha.Lsh(alpha, 1)
alpha.Add(alpha, alpha2)
beta := alpha2.Mul(x, gamma)
x3 := new(big.Int).Mul(alpha, alpha)
beta8 := new(big.Int).Lsh(beta, 3)
beta8.Mod(beta8, curve.P)
x3.Sub(x3, beta8)
if x3.Sign() == -1 {
x3.Add(x3, curve.P)
}
x3.Mod(x3, curve.P)
z3 := new(big.Int).Add(y, z)
z3.Mul(z3, z3)
z3.Sub(z3, gamma)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, delta)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mod(z3, curve.P)
beta.Lsh(beta, 2)
beta.Sub(beta, x3)
if beta.Sign() == -1 {
beta.Add(beta, curve.P)
}
y3 := alpha.Mul(alpha, beta)
gamma.Mul(gamma, gamma)
gamma.Lsh(gamma, 3)
gamma.Mod(gamma, curve.P)
y3.Sub(y3, gamma)
if y3.Sign() == -1 {
y3.Add(y3, curve.P)
}
y3.Mod(y3, curve.P)
return x3, y3, z3
}
func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
return specific.ScalarMult(Bx, By, k)
}
Bz := new(big.Int).SetInt64(1)
x, y, z := new(big.Int), new(big.Int), new(big.Int)
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
x, y, z = curve.doubleJacobian(x, y, z)
if byte&0x80 == 0x80 {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
byte <<= 1
}
}
return curve.affineFromJacobian(x, y, z)
}
func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
return specific.ScalarBaseMult(k)
}
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
// GenerateKey returns a public/private key pair. The private key is // GenerateKey returns a public/private key pair. The private key is

View file

@ -0,0 +1,223 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"crypto/elliptic/internal/nistec"
"crypto/rand"
"errors"
"math/big"
)
var p224 = &nistCurve[*nistec.P224Point]{
newPoint: nistec.NewP224Point,
newGenerator: nistec.NewP224Generator,
}
func initP224() {
p224.params = &CurveParams{
Name: "P-224",
BitSize: 224,
// FIPS 186-4, section D.1.2.2
P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
}
}
var p384 = &nistCurve[*nistec.P384Point]{
newPoint: nistec.NewP384Point,
newGenerator: nistec.NewP384Generator,
}
func initP384() {
p384.params = &CurveParams{
Name: "P-384",
BitSize: 384,
// FIPS 186-4, section D.1.2.4
P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667948293404245721771496870329047266088258938001861606973112319"),
N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667946905279627659399113263569398956308152294913554433653942643"),
B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
"f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
"e082542a385502f25dbf55296c3a545e3872760ab7"),
Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
"13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
}
}
var p521 = &nistCurve[*nistec.P521Point]{
newPoint: nistec.NewP521Point,
newGenerator: nistec.NewP521Generator,
}
func initP521() {
p521.params = &CurveParams{
Name: "P-521",
BitSize: 521,
// FIPS 186-4, section D.1.2.5
P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765605212255964066145455497729631139148" +
"0858037121987999716643812574028291115057151"),
N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765539424505774633321719753296399637136" +
"3321113864768612440380340372808892707005449"),
B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
"b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
"451fd46b503f00"),
Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
"28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
"7e7e31c2e5bd66"),
Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
"afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
"be94769fd16650"),
}
}
// nistCurve is a Curve implementation based on a nistec Point.
//
// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
// legacy idiosyncrasies it requires, such as invalid and infinity point
// handling.
//
// To interact with the nistec package, points are encoded into and decoded from
// properly formatted byte slices. All big.Int use is limited to this package.
// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
// so the overhead is acceptable.
type nistCurve[Point nistPoint[Point]] struct {
newPoint func() Point
newGenerator func() Point
params *CurveParams
}
// nistPoint is a generic constraint for the nistec Point types.
type nistPoint[T any] interface {
Bytes() []byte
SetBytes([]byte) (T, error)
Add(T, T) T
Double(T) T
ScalarMult(T, []byte) T
}
func (curve *nistCurve[Point]) Params() *CurveParams {
return curve.params
}
func (curve *nistCurve[Point]) IsOnCurve(x, y *big.Int) bool {
// IsOnCurve is documented to reject (0, 0), the conventional point at
// infinity, which however is accepted by pointFromAffine.
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
_, err := curve.pointFromAffine(x, y)
return err == nil
}
func (curve *nistCurve[Point]) pointFromAffine(x, y *big.Int) (p Point, err error) {
p = curve.newPoint()
// (0, 0) is by convention the point at infinity, which can't be represented
// in affine coordinates. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return p, nil
}
// Reject values that would not get correctly encoded.
if x.Sign() < 0 || y.Sign() < 0 {
return p, errors.New("negative coordinate")
}
if x.BitLen() > curve.params.BitSize || y.BitLen() > curve.params.BitSize {
return p, errors.New("overflowing coordinate")
}
// Encode the coordinates and let SetBytes reject invalid points.
byteLen := (curve.params.BitSize + 7) / 8
buf := make([]byte, 1+2*byteLen)
buf[0] = 4 // uncompressed point
x.FillBytes(buf[1 : 1+byteLen])
y.FillBytes(buf[1+byteLen : 1+2*byteLen])
return p.SetBytes(buf)
}
func (curve *nistCurve[Point]) pointToAffine(p Point) (x, y *big.Int) {
out := p.Bytes()
if len(out) == 1 && out[0] == 0 {
// This is the correct encoding of the point at infinity, which
// Unmarshal does not support. See Issue 37294.
return new(big.Int), new(big.Int)
}
x, y = Unmarshal(curve, out)
if x == nil {
panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
}
return x, y
}
// randomPoint returns a random point on the curve. It's used when Add,
// Double, or ScalarMult are fed a point not on the curve, which is undefined
// behavior. Originally, we used to do the math on it anyway (which allows
// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
// happening in the first place. Now, we just can't construct a nistec Point
// for an invalid pair of coordinates, because that API is safer. If we panic,
// we risk introducing a DoS. If we return nil, we risk a panic. If we return
// the input, ecdsa.Verify might fail open. The safest course seems to be to
// return a valid, random point, which hopefully won't help the attacker.
func (curve *nistCurve[Point]) randomPoint() (x, y *big.Int) {
_, x, y, err := GenerateKey(curve, rand.Reader)
if err != nil {
panic("crypto/elliptic: failed to generate random point")
}
return x, y
}
func (curve *nistCurve[Point]) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
p1, err := curve.pointFromAffine(x1, y1)
if err != nil {
return curve.randomPoint()
}
p2, err := curve.pointFromAffine(x2, y2)
if err != nil {
return curve.randomPoint()
}
return curve.pointToAffine(p1.Add(p1, p2))
}
func (curve *nistCurve[Point]) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
p, err := curve.pointFromAffine(x1, y1)
if err != nil {
return curve.randomPoint()
}
return curve.pointToAffine(p.Double(p))
}
func (curve *nistCurve[Point]) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
p, err := curve.pointFromAffine(Bx, By)
if err != nil {
return curve.randomPoint()
}
return curve.pointToAffine(p.ScalarMult(p, scalar))
}
func (curve *nistCurve[Point]) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
p := curve.newGenerator()
return curve.pointToAffine(p.ScalarMult(p, scalar))
}
func bigFromDecimal(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("invalid encoding")
}
return b
}
func bigFromHex(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("invalid encoding")
}
return b
}

View file

@ -1,139 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"crypto/elliptic/internal/nistec"
"crypto/rand"
"math/big"
)
// p224Curve is a Curve implementation based on nistec.P224Point.
//
// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
// legacy idiosyncrasies it requires, such as invalid and infinity point
// handling.
//
// To interact with the nistec package, points are encoded into and decoded from
// properly formatted byte slices. All big.Int use is limited to this package.
// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
// so the overhead is acceptable.
type p224Curve struct {
params *CurveParams
}
var p224 p224Curve
var _ Curve = p224
func initP224() {
p224.params = &CurveParams{
Name: "P-224",
BitSize: 224,
// FIPS 186-4, section D.1.2.2
P: bigFromDecimal("26959946667150639794667015087019630673557916260026308143510066298881"),
N: bigFromDecimal("26959946667150639794667015087019625940457807714424391721682722368061"),
B: bigFromHex("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4"),
Gx: bigFromHex("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21"),
Gy: bigFromHex("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34"),
}
}
func (curve p224Curve) Params() *CurveParams {
return curve.params
}
func (curve p224Curve) IsOnCurve(x, y *big.Int) bool {
// IsOnCurve is documented to reject (0, 0), the conventional point at
// infinity, which however is accepted by p224PointFromAffine.
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
_, ok := p224PointFromAffine(x, y)
return ok
}
func p224PointFromAffine(x, y *big.Int) (p *nistec.P224Point, ok bool) {
// (0, 0) is by convention the point at infinity, which can't be represented
// in affine coordinates. Marshal incorrectly encodes it as an uncompressed
// point, which SetBytes would correctly reject. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return nistec.NewP224Point(), true
}
if x.Sign() < 0 || y.Sign() < 0 {
return nil, false
}
if x.BitLen() > 224 || y.BitLen() > 224 {
return nil, false
}
p, err := nistec.NewP224Point().SetBytes(Marshal(P224(), x, y))
if err != nil {
return nil, false
}
return p, true
}
func p224PointToAffine(p *nistec.P224Point) (x, y *big.Int) {
out := p.Bytes()
if len(out) == 1 && out[0] == 0 {
// This is the correct encoding of the point at infinity, which
// Unmarshal does not support. See Issue 37294.
return new(big.Int), new(big.Int)
}
x, y = Unmarshal(P224(), out)
if x == nil {
panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
}
return x, y
}
// p224RandomPoint returns a random point on the curve. It's used when Add,
// Double, or ScalarMult are fed a point not on the curve, which is undefined
// behavior. Originally, we used to do the math on it anyway (which allows
// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
// happening in the first place. Now, we just can't construct a nistec.P224Point
// for an invalid pair of coordinates, because that API is safer. If we panic,
// we risk introducing a DoS. If we return nil, we risk a panic. If we return
// the input, ecdsa.Verify might fail open. The safest course seems to be to
// return a valid, random point, which hopefully won't help the attacker.
func p224RandomPoint() (x, y *big.Int) {
_, x, y, err := GenerateKey(P224(), rand.Reader)
if err != nil {
panic("crypto/elliptic: failed to generate random point")
}
return x, y
}
func (p224Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
p1, ok := p224PointFromAffine(x1, y1)
if !ok {
return p224RandomPoint()
}
p2, ok := p224PointFromAffine(x2, y2)
if !ok {
return p224RandomPoint()
}
return p224PointToAffine(p1.Add(p1, p2))
}
func (p224Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
p, ok := p224PointFromAffine(x1, y1)
if !ok {
return p224RandomPoint()
}
return p224PointToAffine(p.Double(p))
}
func (p224Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
p, ok := p224PointFromAffine(Bx, By)
if !ok {
return p224RandomPoint()
}
return p224PointToAffine(p.ScalarMult(p, scalar))
}
func (p224Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
p := nistec.NewP224Generator()
return p224PointToAffine(p.ScalarMult(p, scalar))
}

File diff suppressed because it is too large Load diff

View file

@ -24,27 +24,18 @@ import (
//go:embed p256_asm_table.bin //go:embed p256_asm_table.bin
var p256Precomputed string var p256Precomputed string
type ( type p256Curve struct {
p256Curve struct {
*CurveParams *CurveParams
} }
p256Point struct { type p256Point struct {
xyz [12]uint64 xyz [12]uint64
} }
)
var p256 p256Curve var p256 p256Curve
func initP256() { func initP256Arch() {
// See FIPS 186-3, section D.2.3 p256 = p256Curve{p256Params}
p256.CurveParams = &CurveParams{Name: "P-256"}
p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10)
p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10)
p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16)
p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16)
p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16)
p256.BitSize = 256
} }
func (curve p256Curve) Params() *CurveParams { func (curve p256Curve) Params() *CurveParams {

View file

@ -1,14 +1,477 @@
// Copyright 2016 The Go Authors. All rights reserved. // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !amd64 && !s390x && !arm64 && !ppc64le //go:build !amd64 && !arm64
package elliptic package elliptic
var p256 p256Curve // This file contains a constant-time, 32-bit implementation of P256.
func initP256Arch() { import "math/big"
// Use pure Go implementation.
p256 = p256Curve{p256Params} type p256Curve struct {
*CurveParams
}
func (curve p256Curve) Params() *CurveParams {
return curve.CurveParams
}
// p256GetScalar endian-swaps the big-endian scalar value from in and writes it
// to out. If the scalar is equal or greater than the order of the group, it's
// reduced modulo that order.
func p256GetScalar(out *[32]byte, in []byte) {
n := new(big.Int).SetBytes(in)
var scalarBytes []byte
if n.Cmp(p256Params.N) >= 0 || len(in) > len(out) {
n.Mod(n, p256Params.N)
scalarBytes = n.Bytes()
} else {
scalarBytes = in
}
for i, v := range scalarBytes {
out[len(scalarBytes)-(1+i)] = v
}
}
func (p256Curve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
var scalarReversed [32]byte
p256GetScalar(&scalarReversed, scalar)
var x1, y1, z1 [p256Limbs]uint32
p256ScalarBaseMult(&x1, &y1, &z1, &scalarReversed)
return p256ToAffine(&x1, &y1, &z1)
}
func (p256Curve) ScalarMult(bigX, bigY *big.Int, scalar []byte) (x, y *big.Int) {
var scalarReversed [32]byte
p256GetScalar(&scalarReversed, scalar)
var px, py, x1, y1, z1 [p256Limbs]uint32
p256FromBig(&px, bigX)
p256FromBig(&py, bigY)
p256ScalarMult(&x1, &y1, &z1, &px, &py, &scalarReversed)
return p256ToAffine(&x1, &y1, &z1)
}
// p256Precomputed contains precomputed values to aid the calculation of scalar
// multiples of the base point, G. It's actually two, equal length, tables
// concatenated.
//
// The first table contains (x,y) field element pairs for 16 multiples of the
// base point, G.
//
// Index | Index (binary) | Value
// 0 | 0000 | 0G (all zeros, omitted)
// 1 | 0001 | G
// 2 | 0010 | 2**64G
// 3 | 0011 | 2**64G + G
// 4 | 0100 | 2**128G
// 5 | 0101 | 2**128G + G
// 6 | 0110 | 2**128G + 2**64G
// 7 | 0111 | 2**128G + 2**64G + G
// 8 | 1000 | 2**192G
// 9 | 1001 | 2**192G + G
// 10 | 1010 | 2**192G + 2**64G
// 11 | 1011 | 2**192G + 2**64G + G
// 12 | 1100 | 2**192G + 2**128G
// 13 | 1101 | 2**192G + 2**128G + G
// 14 | 1110 | 2**192G + 2**128G + 2**64G
// 15 | 1111 | 2**192G + 2**128G + 2**64G + G
//
// The second table follows the same style, but the terms are 2**32G,
// 2**96G, 2**160G, 2**224G.
//
// This is ~2KB of data.
var p256Precomputed = [p256Limbs * 2 * 15 * 2]uint32{
0x11522878, 0xe730d41, 0xdb60179, 0x4afe2ff, 0x12883add, 0xcaddd88, 0x119e7edc, 0xd4a6eab, 0x3120bee,
0x1d2aac15, 0xf25357c, 0x19e45cdd, 0x5c721d0, 0x1992c5a5, 0xa237487, 0x154ba21, 0x14b10bb, 0xae3fe3,
0xd41a576, 0x922fc51, 0x234994f, 0x60b60d3, 0x164586ae, 0xce95f18, 0x1fe49073, 0x3fa36cc, 0x5ebcd2c,
0xb402f2f, 0x15c70bf, 0x1561925c, 0x5a26704, 0xda91e90, 0xcdc1c7f, 0x1ea12446, 0xe1ade1e, 0xec91f22,
0x26f7778, 0x566847e, 0xa0bec9e, 0x234f453, 0x1a31f21a, 0xd85e75c, 0x56c7109, 0xa267a00, 0xb57c050,
0x98fb57, 0xaa837cc, 0x60c0792, 0xcfa5e19, 0x61bab9e, 0x589e39b, 0xa324c5, 0x7d6dee7, 0x2976e4b,
0x1fc4124a, 0xa8c244b, 0x1ce86762, 0xcd61c7e, 0x1831c8e0, 0x75774e1, 0x1d96a5a9, 0x843a649, 0xc3ab0fa,
0x6e2e7d5, 0x7673a2a, 0x178b65e8, 0x4003e9b, 0x1a1f11c2, 0x7816ea, 0xf643e11, 0x58c43df, 0xf423fc2,
0x19633ffa, 0x891f2b2, 0x123c231c, 0x46add8c, 0x54700dd, 0x59e2b17, 0x172db40f, 0x83e277d, 0xb0dd609,
0xfd1da12, 0x35c6e52, 0x19ede20c, 0xd19e0c0, 0x97d0f40, 0xb015b19, 0x449e3f5, 0xe10c9e, 0x33ab581,
0x56a67ab, 0x577734d, 0x1dddc062, 0xc57b10d, 0x149b39d, 0x26a9e7b, 0xc35df9f, 0x48764cd, 0x76dbcca,
0xca4b366, 0xe9303ab, 0x1a7480e7, 0x57e9e81, 0x1e13eb50, 0xf466cf3, 0x6f16b20, 0x4ba3173, 0xc168c33,
0x15cb5439, 0x6a38e11, 0x73658bd, 0xb29564f, 0x3f6dc5b, 0x53b97e, 0x1322c4c0, 0x65dd7ff, 0x3a1e4f6,
0x14e614aa, 0x9246317, 0x1bc83aca, 0xad97eed, 0xd38ce4a, 0xf82b006, 0x341f077, 0xa6add89, 0x4894acd,
0x9f162d5, 0xf8410ef, 0x1b266a56, 0xd7f223, 0x3e0cb92, 0xe39b672, 0x6a2901a, 0x69a8556, 0x7e7c0,
0x9b7d8d3, 0x309a80, 0x1ad05f7f, 0xc2fb5dd, 0xcbfd41d, 0x9ceb638, 0x1051825c, 0xda0cf5b, 0x812e881,
0x6f35669, 0x6a56f2c, 0x1df8d184, 0x345820, 0x1477d477, 0x1645db1, 0xbe80c51, 0xc22be3e, 0xe35e65a,
0x1aeb7aa0, 0xc375315, 0xf67bc99, 0x7fdd7b9, 0x191fc1be, 0x61235d, 0x2c184e9, 0x1c5a839, 0x47a1e26,
0xb7cb456, 0x93e225d, 0x14f3c6ed, 0xccc1ac9, 0x17fe37f3, 0x4988989, 0x1a90c502, 0x2f32042, 0xa17769b,
0xafd8c7c, 0x8191c6e, 0x1dcdb237, 0x16200c0, 0x107b32a1, 0x66c08db, 0x10d06a02, 0x3fc93, 0x5620023,
0x16722b27, 0x68b5c59, 0x270fcfc, 0xfad0ecc, 0xe5de1c2, 0xeab466b, 0x2fc513c, 0x407f75c, 0xbaab133,
0x9705fe9, 0xb88b8e7, 0x734c993, 0x1e1ff8f, 0x19156970, 0xabd0f00, 0x10469ea7, 0x3293ac0, 0xcdc98aa,
0x1d843fd, 0xe14bfe8, 0x15be825f, 0x8b5212, 0xeb3fb67, 0x81cbd29, 0xbc62f16, 0x2b6fcc7, 0xf5a4e29,
0x13560b66, 0xc0b6ac2, 0x51ae690, 0xd41e271, 0xf3e9bd4, 0x1d70aab, 0x1029f72, 0x73e1c35, 0xee70fbc,
0xad81baf, 0x9ecc49a, 0x86c741e, 0xfe6be30, 0x176752e7, 0x23d416, 0x1f83de85, 0x27de188, 0x66f70b8,
0x181cd51f, 0x96b6e4c, 0x188f2335, 0xa5df759, 0x17a77eb6, 0xfeb0e73, 0x154ae914, 0x2f3ec51, 0x3826b59,
0xb91f17d, 0x1c72949, 0x1362bf0a, 0xe23fddf, 0xa5614b0, 0xf7d8f, 0x79061, 0x823d9d2, 0x8213f39,
0x1128ae0b, 0xd095d05, 0xb85c0c2, 0x1ecb2ef, 0x24ddc84, 0xe35e901, 0x18411a4a, 0xf5ddc3d, 0x3786689,
0x52260e8, 0x5ae3564, 0x542b10d, 0x8d93a45, 0x19952aa4, 0x996cc41, 0x1051a729, 0x4be3499, 0x52b23aa,
0x109f307e, 0x6f5b6bb, 0x1f84e1e7, 0x77a0cfa, 0x10c4df3f, 0x25a02ea, 0xb048035, 0xe31de66, 0xc6ecaa3,
0x28ea335, 0x2886024, 0x1372f020, 0xf55d35, 0x15e4684c, 0xf2a9e17, 0x1a4a7529, 0xcb7beb1, 0xb2a78a1,
0x1ab21f1f, 0x6361ccf, 0x6c9179d, 0xb135627, 0x1267b974, 0x4408bad, 0x1cbff658, 0xe3d6511, 0xc7d76f,
0x1cc7a69, 0xe7ee31b, 0x54fab4f, 0x2b914f, 0x1ad27a30, 0xcd3579e, 0xc50124c, 0x50daa90, 0xb13f72,
0xb06aa75, 0x70f5cc6, 0x1649e5aa, 0x84a5312, 0x329043c, 0x41c4011, 0x13d32411, 0xb04a838, 0xd760d2d,
0x1713b532, 0xbaa0c03, 0x84022ab, 0x6bcf5c1, 0x2f45379, 0x18ae070, 0x18c9e11e, 0x20bca9a, 0x66f496b,
0x3eef294, 0x67500d2, 0xd7f613c, 0x2dbbeb, 0xb741038, 0xe04133f, 0x1582968d, 0xbe985f7, 0x1acbc1a,
0x1a6a939f, 0x33e50f6, 0xd665ed4, 0xb4b7bd6, 0x1e5a3799, 0x6b33847, 0x17fa56ff, 0x65ef930, 0x21dc4a,
0x2b37659, 0x450fe17, 0xb357b65, 0xdf5efac, 0x15397bef, 0x9d35a7f, 0x112ac15f, 0x624e62e, 0xa90ae2f,
0x107eecd2, 0x1f69bbe, 0x77d6bce, 0x5741394, 0x13c684fc, 0x950c910, 0x725522b, 0xdc78583, 0x40eeabb,
0x1fde328a, 0xbd61d96, 0xd28c387, 0x9e77d89, 0x12550c40, 0x759cb7d, 0x367ef34, 0xae2a960, 0x91b8bdc,
0x93462a9, 0xf469ef, 0xb2e9aef, 0xd2ca771, 0x54e1f42, 0x7aaa49, 0x6316abb, 0x2413c8e, 0x5425bf9,
0x1bed3e3a, 0xf272274, 0x1f5e7326, 0x6416517, 0xea27072, 0x9cedea7, 0x6e7633, 0x7c91952, 0xd806dce,
0x8e2a7e1, 0xe421e1a, 0x418c9e1, 0x1dbc890, 0x1b395c36, 0xa1dc175, 0x1dc4ef73, 0x8956f34, 0xe4b5cf2,
0x1b0d3a18, 0x3194a36, 0x6c2641f, 0xe44124c, 0xa2f4eaa, 0xa8c25ba, 0xf927ed7, 0x627b614, 0x7371cca,
0xba16694, 0x417bc03, 0x7c0a7e3, 0x9c35c19, 0x1168a205, 0x8b6b00d, 0x10e3edc9, 0x9c19bf2, 0x5882229,
0x1b2b4162, 0xa5cef1a, 0x1543622b, 0x9bd433e, 0x364e04d, 0x7480792, 0x5c9b5b3, 0xe85ff25, 0x408ef57,
0x1814cfa4, 0x121b41b, 0xd248a0f, 0x3b05222, 0x39bb16a, 0xc75966d, 0xa038113, 0xa4a1769, 0x11fbc6c,
0x917e50e, 0xeec3da8, 0x169d6eac, 0x10c1699, 0xa416153, 0xf724912, 0x15cd60b7, 0x4acbad9, 0x5efc5fa,
0xf150ed7, 0x122b51, 0x1104b40a, 0xcb7f442, 0xfbb28ff, 0x6ac53ca, 0x196142cc, 0x7bf0fa9, 0x957651,
0x4e0f215, 0xed439f8, 0x3f46bd5, 0x5ace82f, 0x110916b6, 0x6db078, 0xffd7d57, 0xf2ecaac, 0xca86dec,
0x15d6b2da, 0x965ecc9, 0x1c92b4c2, 0x1f3811, 0x1cb080f5, 0x2d8b804, 0x19d1c12d, 0xf20bd46, 0x1951fa7,
0xa3656c3, 0x523a425, 0xfcd0692, 0xd44ddc8, 0x131f0f5b, 0xaf80e4a, 0xcd9fc74, 0x99bb618, 0x2db944c,
0xa673090, 0x1c210e1, 0x178c8d23, 0x1474383, 0x10b8743d, 0x985a55b, 0x2e74779, 0x576138, 0x9587927,
0x133130fa, 0xbe05516, 0x9f4d619, 0xbb62570, 0x99ec591, 0xd9468fe, 0x1d07782d, 0xfc72e0b, 0x701b298,
0x1863863b, 0x85954b8, 0x121a0c36, 0x9e7fedf, 0xf64b429, 0x9b9d71e, 0x14e2f5d8, 0xf858d3a, 0x942eea8,
0xda5b765, 0x6edafff, 0xa9d18cc, 0xc65e4ba, 0x1c747e86, 0xe4ea915, 0x1981d7a1, 0x8395659, 0x52ed4e2,
0x87d43b7, 0x37ab11b, 0x19d292ce, 0xf8d4692, 0x18c3053f, 0x8863e13, 0x4c146c0, 0x6bdf55a, 0x4e4457d,
0x16152289, 0xac78ec2, 0x1a59c5a2, 0x2028b97, 0x71c2d01, 0x295851f, 0x404747b, 0x878558d, 0x7d29aa4,
0x13d8341f, 0x8daefd7, 0x139c972d, 0x6b7ea75, 0xd4a9dde, 0xff163d8, 0x81d55d7, 0xa5bef68, 0xb7b30d8,
0xbe73d6f, 0xaa88141, 0xd976c81, 0x7e7a9cc, 0x18beb771, 0xd773cbd, 0x13f51951, 0x9d0c177, 0x1c49a78,
}
// Group operations:
//
// Elements of the elliptic curve group are represented in Jacobian
// coordinates: (x, y, z). An affine point (x', y') is x'=x/z**2, y'=y/z**3 in
// Jacobian form.
// p256PointDouble sets {xOut,yOut,zOut} = 2*{x,y,z}.
//
// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
func p256PointDouble(xOut, yOut, zOut, x, y, z *[p256Limbs]uint32) {
var delta, gamma, alpha, beta, tmp, tmp2 [p256Limbs]uint32
p256Square(&delta, z)
p256Square(&gamma, y)
p256Mul(&beta, x, &gamma)
p256Sum(&tmp, x, &delta)
p256Diff(&tmp2, x, &delta)
p256Mul(&alpha, &tmp, &tmp2)
p256Scalar3(&alpha)
p256Sum(&tmp, y, z)
p256Square(&tmp, &tmp)
p256Diff(&tmp, &tmp, &gamma)
p256Diff(zOut, &tmp, &delta)
p256Scalar4(&beta)
p256Square(xOut, &alpha)
p256Diff(xOut, xOut, &beta)
p256Diff(xOut, xOut, &beta)
p256Diff(&tmp, &beta, xOut)
p256Mul(&tmp, &alpha, &tmp)
p256Square(&tmp2, &gamma)
p256Scalar8(&tmp2)
p256Diff(yOut, &tmp, &tmp2)
}
// p256PointAddMixed sets {xOut,yOut,zOut} = {x1,y1,z1} + {x2,y2,1}.
// (i.e. the second point is affine.)
//
// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
//
// Note that this function does not handle P+P, infinity+P nor P+infinity
// correctly.
func p256PointAddMixed(xOut, yOut, zOut, x1, y1, z1, x2, y2 *[p256Limbs]uint32) {
var z1z1, z1z1z1, s2, u2, h, i, j, r, rr, v, tmp [p256Limbs]uint32
p256Square(&z1z1, z1)
p256Sum(&tmp, z1, z1)
p256Mul(&u2, x2, &z1z1)
p256Mul(&z1z1z1, z1, &z1z1)
p256Mul(&s2, y2, &z1z1z1)
p256Diff(&h, &u2, x1)
p256Sum(&i, &h, &h)
p256Square(&i, &i)
p256Mul(&j, &h, &i)
p256Diff(&r, &s2, y1)
p256Sum(&r, &r, &r)
p256Mul(&v, x1, &i)
p256Mul(zOut, &tmp, &h)
p256Square(&rr, &r)
p256Diff(xOut, &rr, &j)
p256Diff(xOut, xOut, &v)
p256Diff(xOut, xOut, &v)
p256Diff(&tmp, &v, xOut)
p256Mul(yOut, &tmp, &r)
p256Mul(&tmp, y1, &j)
p256Diff(yOut, yOut, &tmp)
p256Diff(yOut, yOut, &tmp)
}
// p256PointAdd sets {xOut,yOut,zOut} = {x1,y1,z1} + {x2,y2,z2}.
//
// See https://www.hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
//
// Note that this function does not handle P+P, infinity+P nor P+infinity
// correctly.
func p256PointAdd(xOut, yOut, zOut, x1, y1, z1, x2, y2, z2 *[p256Limbs]uint32) {
var z1z1, z1z1z1, z2z2, z2z2z2, s1, s2, u1, u2, h, i, j, r, rr, v, tmp [p256Limbs]uint32
p256Square(&z1z1, z1)
p256Square(&z2z2, z2)
p256Mul(&u1, x1, &z2z2)
p256Sum(&tmp, z1, z2)
p256Square(&tmp, &tmp)
p256Diff(&tmp, &tmp, &z1z1)
p256Diff(&tmp, &tmp, &z2z2)
p256Mul(&z2z2z2, z2, &z2z2)
p256Mul(&s1, y1, &z2z2z2)
p256Mul(&u2, x2, &z1z1)
p256Mul(&z1z1z1, z1, &z1z1)
p256Mul(&s2, y2, &z1z1z1)
p256Diff(&h, &u2, &u1)
p256Sum(&i, &h, &h)
p256Square(&i, &i)
p256Mul(&j, &h, &i)
p256Diff(&r, &s2, &s1)
p256Sum(&r, &r, &r)
p256Mul(&v, &u1, &i)
p256Mul(zOut, &tmp, &h)
p256Square(&rr, &r)
p256Diff(xOut, &rr, &j)
p256Diff(xOut, xOut, &v)
p256Diff(xOut, xOut, &v)
p256Diff(&tmp, &v, xOut)
p256Mul(yOut, &tmp, &r)
p256Mul(&tmp, &s1, &j)
p256Diff(yOut, yOut, &tmp)
p256Diff(yOut, yOut, &tmp)
}
// p256SelectAffinePoint sets {out_x,out_y} to the index'th entry of table.
//
// On entry: index < 16, table[0] must be zero.
func p256SelectAffinePoint(xOut, yOut *[p256Limbs]uint32, table []uint32, index uint32) {
for i := range xOut {
xOut[i] = 0
}
for i := range yOut {
yOut[i] = 0
}
for i := uint32(1); i < 16; i++ {
mask := i ^ index
mask |= mask >> 2
mask |= mask >> 1
mask &= 1
mask--
for j := range xOut {
xOut[j] |= table[0] & mask
table = table[1:]
}
for j := range yOut {
yOut[j] |= table[0] & mask
table = table[1:]
}
}
}
// p256SelectJacobianPoint sets {out_x,out_y,out_z} to the index'th entry of
// table.
//
// On entry: index < 16, table[0] must be zero.
func p256SelectJacobianPoint(xOut, yOut, zOut *[p256Limbs]uint32, table *[16][3][p256Limbs]uint32, index uint32) {
for i := range xOut {
xOut[i] = 0
}
for i := range yOut {
yOut[i] = 0
}
for i := range zOut {
zOut[i] = 0
}
// The implicit value at index 0 is all zero. We don't need to perform that
// iteration of the loop because we already set out_* to zero.
for i := uint32(1); i < 16; i++ {
mask := i ^ index
mask |= mask >> 2
mask |= mask >> 1
mask &= 1
mask--
for j := range xOut {
xOut[j] |= table[i][0][j] & mask
}
for j := range yOut {
yOut[j] |= table[i][1][j] & mask
}
for j := range zOut {
zOut[j] |= table[i][2][j] & mask
}
}
}
// p256GetBit returns the bit'th bit of scalar.
func p256GetBit(scalar *[32]uint8, bit uint) uint32 {
return uint32(((scalar[bit>>3]) >> (bit & 7)) & 1)
}
// p256ScalarBaseMult sets {xOut,yOut,zOut} = scalar*G where scalar is a
// little-endian number. Note that the value of scalar must be less than the
// order of the group.
func p256ScalarBaseMult(xOut, yOut, zOut *[p256Limbs]uint32, scalar *[32]uint8) {
nIsInfinityMask := ^uint32(0)
var pIsNoninfiniteMask, mask, tableOffset uint32
var px, py, tx, ty, tz [p256Limbs]uint32
for i := range xOut {
xOut[i] = 0
}
for i := range yOut {
yOut[i] = 0
}
for i := range zOut {
zOut[i] = 0
}
// The loop adds bits at positions 0, 64, 128 and 192, followed by
// positions 32,96,160 and 224 and does this 32 times.
for i := uint(0); i < 32; i++ {
if i != 0 {
p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
}
tableOffset = 0
for j := uint(0); j <= 32; j += 32 {
bit0 := p256GetBit(scalar, 31-i+j)
bit1 := p256GetBit(scalar, 95-i+j)
bit2 := p256GetBit(scalar, 159-i+j)
bit3 := p256GetBit(scalar, 223-i+j)
index := bit0 | (bit1 << 1) | (bit2 << 2) | (bit3 << 3)
p256SelectAffinePoint(&px, &py, p256Precomputed[tableOffset:], index)
tableOffset += 30 * p256Limbs
// Since scalar is less than the order of the group, we know that
// {xOut,yOut,zOut} != {px,py,1}, unless both are zero, which we handle
// below.
p256PointAddMixed(&tx, &ty, &tz, xOut, yOut, zOut, &px, &py)
// The result of pointAddMixed is incorrect if {xOut,yOut,zOut} is zero
// (a.k.a. the point at infinity). We handle that situation by
// copying the point from the table.
p256CopyConditional(xOut, &px, nIsInfinityMask)
p256CopyConditional(yOut, &py, nIsInfinityMask)
p256CopyConditional(zOut, &p256One, nIsInfinityMask)
// Equally, the result is also wrong if the point from the table is
// zero, which happens when the index is zero. We handle that by
// only copying from {tx,ty,tz} to {xOut,yOut,zOut} if index != 0.
pIsNoninfiniteMask = nonZeroToAllOnes(index)
mask = pIsNoninfiniteMask & ^nIsInfinityMask
p256CopyConditional(xOut, &tx, mask)
p256CopyConditional(yOut, &ty, mask)
p256CopyConditional(zOut, &tz, mask)
// If p was not zero, then n is now non-zero.
nIsInfinityMask &^= pIsNoninfiniteMask
}
}
}
// p256PointToAffine converts a Jacobian point to an affine point. If the input
// is the point at infinity then it returns (0, 0) in constant time.
func p256PointToAffine(xOut, yOut, x, y, z *[p256Limbs]uint32) {
var zInv, zInvSq [p256Limbs]uint32
p256Invert(&zInv, z)
p256Square(&zInvSq, &zInv)
p256Mul(xOut, x, &zInvSq)
p256Mul(&zInv, &zInv, &zInvSq)
p256Mul(yOut, y, &zInv)
}
// p256ToAffine returns a pair of *big.Int containing the affine representation
// of {x,y,z}.
func p256ToAffine(x, y, z *[p256Limbs]uint32) (xOut, yOut *big.Int) {
var xx, yy [p256Limbs]uint32
p256PointToAffine(&xx, &yy, x, y, z)
return p256ToBig(&xx), p256ToBig(&yy)
}
// p256ScalarMult sets {xOut,yOut,zOut} = scalar*{x,y}.
func p256ScalarMult(xOut, yOut, zOut, x, y *[p256Limbs]uint32, scalar *[32]uint8) {
var px, py, pz, tx, ty, tz [p256Limbs]uint32
var precomp [16][3][p256Limbs]uint32
var nIsInfinityMask, index, pIsNoninfiniteMask, mask uint32
// We precompute 0,1,2,... times {x,y}.
precomp[1][0] = *x
precomp[1][1] = *y
precomp[1][2] = p256One
for i := 2; i < 16; i += 2 {
p256PointDouble(&precomp[i][0], &precomp[i][1], &precomp[i][2], &precomp[i/2][0], &precomp[i/2][1], &precomp[i/2][2])
p256PointAddMixed(&precomp[i+1][0], &precomp[i+1][1], &precomp[i+1][2], &precomp[i][0], &precomp[i][1], &precomp[i][2], x, y)
}
for i := range xOut {
xOut[i] = 0
}
for i := range yOut {
yOut[i] = 0
}
for i := range zOut {
zOut[i] = 0
}
nIsInfinityMask = ^uint32(0)
// We add in a window of four bits each iteration and do this 64 times.
for i := 0; i < 64; i++ {
if i != 0 {
p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
p256PointDouble(xOut, yOut, zOut, xOut, yOut, zOut)
}
index = uint32(scalar[31-i/2])
if (i & 1) == 1 {
index &= 15
} else {
index >>= 4
}
// See the comments in scalarBaseMult about handling infinities.
p256SelectJacobianPoint(&px, &py, &pz, &precomp, index)
p256PointAdd(&tx, &ty, &tz, xOut, yOut, zOut, &px, &py, &pz)
p256CopyConditional(xOut, &px, nIsInfinityMask)
p256CopyConditional(yOut, &py, nIsInfinityMask)
p256CopyConditional(zOut, &pz, nIsInfinityMask)
pIsNoninfiniteMask = nonZeroToAllOnes(index)
mask = pIsNoninfiniteMask & ^nIsInfinityMask
p256CopyConditional(xOut, &tx, mask)
p256CopyConditional(yOut, &ty, mask)
p256CopyConditional(zOut, &tz, mask)
nIsInfinityMask &^= pIsNoninfiniteMask
}
} }

View file

@ -0,0 +1,705 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 && !arm64
package elliptic
import "math/big"
// Field elements are represented as nine, unsigned 32-bit words.
//
// The value of a field element is:
// x[0] + (x[1] * 2**29) + (x[2] * 2**57) + ... + (x[8] * 2**228)
//
// That is, each limb is alternately 29 or 28-bits wide in little-endian
// order.
//
// This means that a field element hits 2**257, rather than 2**256 as we would
// like. A 28, 29, ... pattern would cause us to hit 2**256, but that causes
// problems when multiplying as terms end up one bit short of a limb which
// would require much bit-shifting to correct.
//
// Finally, the values stored in a field element are in Montgomery form. So the
// value |y| is stored as (y*R) mod p, where p is the P-256 prime and R is
// 2**257.
const (
p256Limbs = 9
bottom29Bits = 0x1fffffff
)
var (
// p256One is the number 1 as a field element.
p256One = [p256Limbs]uint32{2, 0, 0, 0xffff800, 0x1fffffff, 0xfffffff, 0x1fbfffff, 0x1ffffff, 0}
p256Zero = [p256Limbs]uint32{0, 0, 0, 0, 0, 0, 0, 0, 0}
// p256P is the prime modulus as a field element.
p256P = [p256Limbs]uint32{0x1fffffff, 0xfffffff, 0x1fffffff, 0x3ff, 0, 0, 0x200000, 0xf000000, 0xfffffff}
// p2562P is the twice prime modulus as a field element.
p2562P = [p256Limbs]uint32{0x1ffffffe, 0xfffffff, 0x1fffffff, 0x7ff, 0, 0, 0x400000, 0xe000000, 0x1fffffff}
)
// Field element operations:
const bottom28Bits = 0xfffffff
// nonZeroToAllOnes returns:
//
// 0xffffffff for 0 < x <= 2**31
// 0 for x == 0 or x > 2**31.
func nonZeroToAllOnes(x uint32) uint32 {
return ((x - 1) >> 31) - 1
}
// p256ReduceCarry adds a multiple of p in order to cancel |carry|,
// which is a term at 2**257.
//
// On entry: carry < 2**3, inout[0,2,...] < 2**29, inout[1,3,...] < 2**28.
// On exit: inout[0,2,..] < 2**30, inout[1,3,...] < 2**29.
func p256ReduceCarry(inout *[p256Limbs]uint32, carry uint32) {
carry_mask := nonZeroToAllOnes(carry)
inout[0] += carry << 1
inout[3] += 0x10000000 & carry_mask
// carry < 2**3 thus (carry << 11) < 2**14 and we added 2**28 in the
// previous line therefore this doesn't underflow.
inout[3] -= carry << 11
inout[4] += (0x20000000 - 1) & carry_mask
inout[5] += (0x10000000 - 1) & carry_mask
inout[6] += (0x20000000 - 1) & carry_mask
inout[6] -= carry << 22
// This may underflow if carry is non-zero but, if so, we'll fix it in the
// next line.
inout[7] -= 1 & carry_mask
inout[7] += carry << 25
}
// p256Sum sets out = in+in2.
//
// On entry: in[i]+in2[i] must not overflow a 32-bit word.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Sum(out, in, in2 *[p256Limbs]uint32) {
carry := uint32(0)
for i := 0; ; i++ {
out[i] = in[i] + in2[i]
out[i] += carry
carry = out[i] >> 29
out[i] &= bottom29Bits
i++
if i == p256Limbs {
break
}
out[i] = in[i] + in2[i]
out[i] += carry
carry = out[i] >> 28
out[i] &= bottom28Bits
}
p256ReduceCarry(out, carry)
}
const (
two30m2 = 1<<30 - 1<<2
two30p13m2 = 1<<30 + 1<<13 - 1<<2
two31m2 = 1<<31 - 1<<2
two31m3 = 1<<31 - 1<<3
two31p24m2 = 1<<31 + 1<<24 - 1<<2
two30m27m2 = 1<<30 - 1<<27 - 1<<2
)
// p256Zero31 is 0 mod p.
var p256Zero31 = [p256Limbs]uint32{two31m3, two30m2, two31m2, two30p13m2, two31m2, two30m2, two31p24m2, two30m27m2, two31m2}
// p256Diff sets out = in-in2.
//
// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29 and
// in2[0,2,...] < 2**30, in2[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Diff(out, in, in2 *[p256Limbs]uint32) {
var carry uint32
for i := 0; ; i++ {
out[i] = in[i] - in2[i]
out[i] += p256Zero31[i]
out[i] += carry
carry = out[i] >> 29
out[i] &= bottom29Bits
i++
if i == p256Limbs {
break
}
out[i] = in[i] - in2[i]
out[i] += p256Zero31[i]
out[i] += carry
carry = out[i] >> 28
out[i] &= bottom28Bits
}
p256ReduceCarry(out, carry)
}
// p256ReduceDegree sets out = tmp/R mod p where tmp contains 64-bit words with
// the same 29,28,... bit positions as a field element.
//
// The values in field elements are in Montgomery form: x*R mod p where R =
// 2**257. Since we just multiplied two Montgomery values together, the result
// is x*y*R*R mod p. We wish to divide by R in order for the result also to be
// in Montgomery form.
//
// On entry: tmp[i] < 2**64.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256ReduceDegree(out *[p256Limbs]uint32, tmp [17]uint64) {
// The following table may be helpful when reading this code:
//
// Limb number: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10...
// Width (bits): 29| 28| 29| 28| 29| 28| 29| 28| 29| 28| 29
// Start bit: 0 | 29| 57| 86|114|143|171|200|228|257|285
// (odd phase): 0 | 28| 57| 85|114|142|171|199|228|256|285
var tmp2 [18]uint32
var carry, x, xMask uint32
// tmp contains 64-bit words with the same 29,28,29-bit positions as a
// field element. So the top of an element of tmp might overlap with
// another element two positions down. The following loop eliminates
// this overlap.
tmp2[0] = uint32(tmp[0]) & bottom29Bits
tmp2[1] = uint32(tmp[0]) >> 29
tmp2[1] |= (uint32(tmp[0]>>32) << 3) & bottom28Bits
tmp2[1] += uint32(tmp[1]) & bottom28Bits
carry = tmp2[1] >> 28
tmp2[1] &= bottom28Bits
for i := 2; i < 17; i++ {
tmp2[i] = (uint32(tmp[i-2] >> 32)) >> 25
tmp2[i] += (uint32(tmp[i-1])) >> 28
tmp2[i] += (uint32(tmp[i-1]>>32) << 4) & bottom29Bits
tmp2[i] += uint32(tmp[i]) & bottom29Bits
tmp2[i] += carry
carry = tmp2[i] >> 29
tmp2[i] &= bottom29Bits
i++
if i == 17 {
break
}
tmp2[i] = uint32(tmp[i-2]>>32) >> 25
tmp2[i] += uint32(tmp[i-1]) >> 29
tmp2[i] += ((uint32(tmp[i-1] >> 32)) << 3) & bottom28Bits
tmp2[i] += uint32(tmp[i]) & bottom28Bits
tmp2[i] += carry
carry = tmp2[i] >> 28
tmp2[i] &= bottom28Bits
}
tmp2[17] = uint32(tmp[15]>>32) >> 25
tmp2[17] += uint32(tmp[16]) >> 29
tmp2[17] += uint32(tmp[16]>>32) << 3
tmp2[17] += carry
// Montgomery elimination of terms:
//
// Since R is 2**257, we can divide by R with a bitwise shift if we can
// ensure that the right-most 257 bits are all zero. We can make that true
// by adding multiplies of p without affecting the value.
//
// So we eliminate limbs from right to left. Since the bottom 29 bits of p
// are all ones, then by adding tmp2[0]*p to tmp2 we'll make tmp2[0] == 0.
// We can do that for 8 further limbs and then right shift to eliminate the
// extra factor of R.
for i := 0; ; i += 2 {
tmp2[i+1] += tmp2[i] >> 29
x = tmp2[i] & bottom29Bits
xMask = nonZeroToAllOnes(x)
tmp2[i] = 0
// The bounds calculations for this loop are tricky. Each iteration of
// the loop eliminates two words by adding values to words to their
// right.
//
// The following table contains the amounts added to each word (as an
// offset from the value of i at the top of the loop). The amounts are
// accounted for from the first and second half of the loop separately
// and are written as, for example, 28 to mean a value <2**28.
//
// Word: 3 4 5 6 7 8 9 10
// Added in top half: 28 11 29 21 29 28
// 28 29
// 29
// Added in bottom half: 29 10 28 21 28 28
// 29
//
// The value that is currently offset 7 will be offset 5 for the next
// iteration and then offset 3 for the iteration after that. Therefore
// the total value added will be the values added at 7, 5 and 3.
//
// The following table accumulates these values. The sums at the bottom
// are written as, for example, 29+28, to mean a value < 2**29+2**28.
//
// Word: 3 4 5 6 7 8 9 10 11 12 13
// 28 11 10 29 21 29 28 28 28 28 28
// 29 28 11 28 29 28 29 28 29 28
// 29 28 21 21 29 21 29 21
// 10 29 28 21 28 21 28
// 28 29 28 29 28 29 28
// 11 10 29 10 29 10
// 29 28 11 28 11
// 29 29
// --------------------------------------------
// 30+ 31+ 30+ 31+ 30+
// 28+ 29+ 28+ 29+ 21+
// 21+ 28+ 21+ 28+ 10
// 10 21+ 10 21+
// 11 11
//
// So the greatest amount is added to tmp2[10] and tmp2[12]. If
// tmp2[10/12] has an initial value of <2**29, then the maximum value
// will be < 2**31 + 2**30 + 2**28 + 2**21 + 2**11, which is < 2**32,
// as required.
tmp2[i+3] += (x << 10) & bottom28Bits
tmp2[i+4] += (x >> 18)
tmp2[i+6] += (x << 21) & bottom29Bits
tmp2[i+7] += x >> 8
// At position 200, which is the starting bit position for word 7, we
// have a factor of 0xf000000 = 2**28 - 2**24.
tmp2[i+7] += 0x10000000 & xMask
tmp2[i+8] += (x - 1) & xMask
tmp2[i+7] -= (x << 24) & bottom28Bits
tmp2[i+8] -= x >> 4
tmp2[i+8] += 0x20000000 & xMask
tmp2[i+8] -= x
tmp2[i+8] += (x << 28) & bottom29Bits
tmp2[i+9] += ((x >> 1) - 1) & xMask
if i+1 == p256Limbs {
break
}
tmp2[i+2] += tmp2[i+1] >> 28
x = tmp2[i+1] & bottom28Bits
xMask = nonZeroToAllOnes(x)
tmp2[i+1] = 0
tmp2[i+4] += (x << 11) & bottom29Bits
tmp2[i+5] += (x >> 18)
tmp2[i+7] += (x << 21) & bottom28Bits
tmp2[i+8] += x >> 7
// At position 199, which is the starting bit of the 8th word when
// dealing with a context starting on an odd word, we have a factor of
// 0x1e000000 = 2**29 - 2**25. Since we have not updated i, the 8th
// word from i+1 is i+8.
tmp2[i+8] += 0x20000000 & xMask
tmp2[i+9] += (x - 1) & xMask
tmp2[i+8] -= (x << 25) & bottom29Bits
tmp2[i+9] -= x >> 4
tmp2[i+9] += 0x10000000 & xMask
tmp2[i+9] -= x
tmp2[i+10] += (x - 1) & xMask
}
// We merge the right shift with a carry chain. The words above 2**257 have
// widths of 28,29,... which we need to correct when copying them down.
carry = 0
for i := 0; i < 8; i++ {
// The maximum value of tmp2[i + 9] occurs on the first iteration and
// is < 2**30+2**29+2**28. Adding 2**29 (from tmp2[i + 10]) is
// therefore safe.
out[i] = tmp2[i+9]
out[i] += carry
out[i] += (tmp2[i+10] << 28) & bottom29Bits
carry = out[i] >> 29
out[i] &= bottom29Bits
i++
out[i] = tmp2[i+9] >> 1
out[i] += carry
carry = out[i] >> 28
out[i] &= bottom28Bits
}
out[8] = tmp2[17]
out[8] += carry
carry = out[8] >> 29
out[8] &= bottom29Bits
p256ReduceCarry(out, carry)
}
// p256Square sets out=in*in.
//
// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Square(out, in *[p256Limbs]uint32) {
var tmp [17]uint64
tmp[0] = uint64(in[0]) * uint64(in[0])
tmp[1] = uint64(in[0]) * (uint64(in[1]) << 1)
tmp[2] = uint64(in[0])*(uint64(in[2])<<1) +
uint64(in[1])*(uint64(in[1])<<1)
tmp[3] = uint64(in[0])*(uint64(in[3])<<1) +
uint64(in[1])*(uint64(in[2])<<1)
tmp[4] = uint64(in[0])*(uint64(in[4])<<1) +
uint64(in[1])*(uint64(in[3])<<2) +
uint64(in[2])*uint64(in[2])
tmp[5] = uint64(in[0])*(uint64(in[5])<<1) +
uint64(in[1])*(uint64(in[4])<<1) +
uint64(in[2])*(uint64(in[3])<<1)
tmp[6] = uint64(in[0])*(uint64(in[6])<<1) +
uint64(in[1])*(uint64(in[5])<<2) +
uint64(in[2])*(uint64(in[4])<<1) +
uint64(in[3])*(uint64(in[3])<<1)
tmp[7] = uint64(in[0])*(uint64(in[7])<<1) +
uint64(in[1])*(uint64(in[6])<<1) +
uint64(in[2])*(uint64(in[5])<<1) +
uint64(in[3])*(uint64(in[4])<<1)
// tmp[8] has the greatest value of 2**61 + 2**60 + 2**61 + 2**60 + 2**60,
// which is < 2**64 as required.
tmp[8] = uint64(in[0])*(uint64(in[8])<<1) +
uint64(in[1])*(uint64(in[7])<<2) +
uint64(in[2])*(uint64(in[6])<<1) +
uint64(in[3])*(uint64(in[5])<<2) +
uint64(in[4])*uint64(in[4])
tmp[9] = uint64(in[1])*(uint64(in[8])<<1) +
uint64(in[2])*(uint64(in[7])<<1) +
uint64(in[3])*(uint64(in[6])<<1) +
uint64(in[4])*(uint64(in[5])<<1)
tmp[10] = uint64(in[2])*(uint64(in[8])<<1) +
uint64(in[3])*(uint64(in[7])<<2) +
uint64(in[4])*(uint64(in[6])<<1) +
uint64(in[5])*(uint64(in[5])<<1)
tmp[11] = uint64(in[3])*(uint64(in[8])<<1) +
uint64(in[4])*(uint64(in[7])<<1) +
uint64(in[5])*(uint64(in[6])<<1)
tmp[12] = uint64(in[4])*(uint64(in[8])<<1) +
uint64(in[5])*(uint64(in[7])<<2) +
uint64(in[6])*uint64(in[6])
tmp[13] = uint64(in[5])*(uint64(in[8])<<1) +
uint64(in[6])*(uint64(in[7])<<1)
tmp[14] = uint64(in[6])*(uint64(in[8])<<1) +
uint64(in[7])*(uint64(in[7])<<1)
tmp[15] = uint64(in[7]) * (uint64(in[8]) << 1)
tmp[16] = uint64(in[8]) * uint64(in[8])
p256ReduceDegree(out, tmp)
}
// p256Mul sets out=in*in2.
//
// On entry: in[0,2,...] < 2**30, in[1,3,...] < 2**29 and
// in2[0,2,...] < 2**30, in2[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Mul(out, in, in2 *[p256Limbs]uint32) {
var tmp [17]uint64
tmp[0] = uint64(in[0]) * uint64(in2[0])
tmp[1] = uint64(in[0])*(uint64(in2[1])<<0) +
uint64(in[1])*(uint64(in2[0])<<0)
tmp[2] = uint64(in[0])*(uint64(in2[2])<<0) +
uint64(in[1])*(uint64(in2[1])<<1) +
uint64(in[2])*(uint64(in2[0])<<0)
tmp[3] = uint64(in[0])*(uint64(in2[3])<<0) +
uint64(in[1])*(uint64(in2[2])<<0) +
uint64(in[2])*(uint64(in2[1])<<0) +
uint64(in[3])*(uint64(in2[0])<<0)
tmp[4] = uint64(in[0])*(uint64(in2[4])<<0) +
uint64(in[1])*(uint64(in2[3])<<1) +
uint64(in[2])*(uint64(in2[2])<<0) +
uint64(in[3])*(uint64(in2[1])<<1) +
uint64(in[4])*(uint64(in2[0])<<0)
tmp[5] = uint64(in[0])*(uint64(in2[5])<<0) +
uint64(in[1])*(uint64(in2[4])<<0) +
uint64(in[2])*(uint64(in2[3])<<0) +
uint64(in[3])*(uint64(in2[2])<<0) +
uint64(in[4])*(uint64(in2[1])<<0) +
uint64(in[5])*(uint64(in2[0])<<0)
tmp[6] = uint64(in[0])*(uint64(in2[6])<<0) +
uint64(in[1])*(uint64(in2[5])<<1) +
uint64(in[2])*(uint64(in2[4])<<0) +
uint64(in[3])*(uint64(in2[3])<<1) +
uint64(in[4])*(uint64(in2[2])<<0) +
uint64(in[5])*(uint64(in2[1])<<1) +
uint64(in[6])*(uint64(in2[0])<<0)
tmp[7] = uint64(in[0])*(uint64(in2[7])<<0) +
uint64(in[1])*(uint64(in2[6])<<0) +
uint64(in[2])*(uint64(in2[5])<<0) +
uint64(in[3])*(uint64(in2[4])<<0) +
uint64(in[4])*(uint64(in2[3])<<0) +
uint64(in[5])*(uint64(in2[2])<<0) +
uint64(in[6])*(uint64(in2[1])<<0) +
uint64(in[7])*(uint64(in2[0])<<0)
// tmp[8] has the greatest value but doesn't overflow. See logic in
// p256Square.
tmp[8] = uint64(in[0])*(uint64(in2[8])<<0) +
uint64(in[1])*(uint64(in2[7])<<1) +
uint64(in[2])*(uint64(in2[6])<<0) +
uint64(in[3])*(uint64(in2[5])<<1) +
uint64(in[4])*(uint64(in2[4])<<0) +
uint64(in[5])*(uint64(in2[3])<<1) +
uint64(in[6])*(uint64(in2[2])<<0) +
uint64(in[7])*(uint64(in2[1])<<1) +
uint64(in[8])*(uint64(in2[0])<<0)
tmp[9] = uint64(in[1])*(uint64(in2[8])<<0) +
uint64(in[2])*(uint64(in2[7])<<0) +
uint64(in[3])*(uint64(in2[6])<<0) +
uint64(in[4])*(uint64(in2[5])<<0) +
uint64(in[5])*(uint64(in2[4])<<0) +
uint64(in[6])*(uint64(in2[3])<<0) +
uint64(in[7])*(uint64(in2[2])<<0) +
uint64(in[8])*(uint64(in2[1])<<0)
tmp[10] = uint64(in[2])*(uint64(in2[8])<<0) +
uint64(in[3])*(uint64(in2[7])<<1) +
uint64(in[4])*(uint64(in2[6])<<0) +
uint64(in[5])*(uint64(in2[5])<<1) +
uint64(in[6])*(uint64(in2[4])<<0) +
uint64(in[7])*(uint64(in2[3])<<1) +
uint64(in[8])*(uint64(in2[2])<<0)
tmp[11] = uint64(in[3])*(uint64(in2[8])<<0) +
uint64(in[4])*(uint64(in2[7])<<0) +
uint64(in[5])*(uint64(in2[6])<<0) +
uint64(in[6])*(uint64(in2[5])<<0) +
uint64(in[7])*(uint64(in2[4])<<0) +
uint64(in[8])*(uint64(in2[3])<<0)
tmp[12] = uint64(in[4])*(uint64(in2[8])<<0) +
uint64(in[5])*(uint64(in2[7])<<1) +
uint64(in[6])*(uint64(in2[6])<<0) +
uint64(in[7])*(uint64(in2[5])<<1) +
uint64(in[8])*(uint64(in2[4])<<0)
tmp[13] = uint64(in[5])*(uint64(in2[8])<<0) +
uint64(in[6])*(uint64(in2[7])<<0) +
uint64(in[7])*(uint64(in2[6])<<0) +
uint64(in[8])*(uint64(in2[5])<<0)
tmp[14] = uint64(in[6])*(uint64(in2[8])<<0) +
uint64(in[7])*(uint64(in2[7])<<1) +
uint64(in[8])*(uint64(in2[6])<<0)
tmp[15] = uint64(in[7])*(uint64(in2[8])<<0) +
uint64(in[8])*(uint64(in2[7])<<0)
tmp[16] = uint64(in[8]) * (uint64(in2[8]) << 0)
p256ReduceDegree(out, tmp)
}
func p256Assign(out, in *[p256Limbs]uint32) {
*out = *in
}
// p256Invert calculates |out| = |in|^{-1}
//
// Based on Fermat's Little Theorem:
//
// a^p = a (mod p)
// a^{p-1} = 1 (mod p)
// a^{p-2} = a^{-1} (mod p)
func p256Invert(out, in *[p256Limbs]uint32) {
var ftmp, ftmp2 [p256Limbs]uint32
// each e_I will hold |in|^{2^I - 1}
var e2, e4, e8, e16, e32, e64 [p256Limbs]uint32
p256Square(&ftmp, in) // 2^1
p256Mul(&ftmp, in, &ftmp) // 2^2 - 2^0
p256Assign(&e2, &ftmp)
p256Square(&ftmp, &ftmp) // 2^3 - 2^1
p256Square(&ftmp, &ftmp) // 2^4 - 2^2
p256Mul(&ftmp, &ftmp, &e2) // 2^4 - 2^0
p256Assign(&e4, &ftmp)
p256Square(&ftmp, &ftmp) // 2^5 - 2^1
p256Square(&ftmp, &ftmp) // 2^6 - 2^2
p256Square(&ftmp, &ftmp) // 2^7 - 2^3
p256Square(&ftmp, &ftmp) // 2^8 - 2^4
p256Mul(&ftmp, &ftmp, &e4) // 2^8 - 2^0
p256Assign(&e8, &ftmp)
for i := 0; i < 8; i++ {
p256Square(&ftmp, &ftmp)
} // 2^16 - 2^8
p256Mul(&ftmp, &ftmp, &e8) // 2^16 - 2^0
p256Assign(&e16, &ftmp)
for i := 0; i < 16; i++ {
p256Square(&ftmp, &ftmp)
} // 2^32 - 2^16
p256Mul(&ftmp, &ftmp, &e16) // 2^32 - 2^0
p256Assign(&e32, &ftmp)
for i := 0; i < 32; i++ {
p256Square(&ftmp, &ftmp)
} // 2^64 - 2^32
p256Assign(&e64, &ftmp)
p256Mul(&ftmp, &ftmp, in) // 2^64 - 2^32 + 2^0
for i := 0; i < 192; i++ {
p256Square(&ftmp, &ftmp)
} // 2^256 - 2^224 + 2^192
p256Mul(&ftmp2, &e64, &e32) // 2^64 - 2^0
for i := 0; i < 16; i++ {
p256Square(&ftmp2, &ftmp2)
} // 2^80 - 2^16
p256Mul(&ftmp2, &ftmp2, &e16) // 2^80 - 2^0
for i := 0; i < 8; i++ {
p256Square(&ftmp2, &ftmp2)
} // 2^88 - 2^8
p256Mul(&ftmp2, &ftmp2, &e8) // 2^88 - 2^0
for i := 0; i < 4; i++ {
p256Square(&ftmp2, &ftmp2)
} // 2^92 - 2^4
p256Mul(&ftmp2, &ftmp2, &e4) // 2^92 - 2^0
p256Square(&ftmp2, &ftmp2) // 2^93 - 2^1
p256Square(&ftmp2, &ftmp2) // 2^94 - 2^2
p256Mul(&ftmp2, &ftmp2, &e2) // 2^94 - 2^0
p256Square(&ftmp2, &ftmp2) // 2^95 - 2^1
p256Square(&ftmp2, &ftmp2) // 2^96 - 2^2
p256Mul(&ftmp2, &ftmp2, in) // 2^96 - 3
p256Mul(out, &ftmp2, &ftmp) // 2^256 - 2^224 + 2^192 + 2^96 - 3
}
// p256Scalar3 sets out=3*out.
//
// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Scalar3(out *[p256Limbs]uint32) {
var carry uint32
for i := 0; ; i++ {
out[i] *= 3
out[i] += carry
carry = out[i] >> 29
out[i] &= bottom29Bits
i++
if i == p256Limbs {
break
}
out[i] *= 3
out[i] += carry
carry = out[i] >> 28
out[i] &= bottom28Bits
}
p256ReduceCarry(out, carry)
}
// p256Scalar4 sets out=4*out.
//
// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Scalar4(out *[p256Limbs]uint32) {
var carry, nextCarry uint32
for i := 0; ; i++ {
nextCarry = out[i] >> 27
out[i] <<= 2
out[i] &= bottom29Bits
out[i] += carry
carry = nextCarry + (out[i] >> 29)
out[i] &= bottom29Bits
i++
if i == p256Limbs {
break
}
nextCarry = out[i] >> 26
out[i] <<= 2
out[i] &= bottom28Bits
out[i] += carry
carry = nextCarry + (out[i] >> 28)
out[i] &= bottom28Bits
}
p256ReduceCarry(out, carry)
}
// p256Scalar8 sets out=8*out.
//
// On entry: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
// On exit: out[0,2,...] < 2**30, out[1,3,...] < 2**29.
func p256Scalar8(out *[p256Limbs]uint32) {
var carry, nextCarry uint32
for i := 0; ; i++ {
nextCarry = out[i] >> 26
out[i] <<= 3
out[i] &= bottom29Bits
out[i] += carry
carry = nextCarry + (out[i] >> 29)
out[i] &= bottom29Bits
i++
if i == p256Limbs {
break
}
nextCarry = out[i] >> 25
out[i] <<= 3
out[i] &= bottom28Bits
out[i] += carry
carry = nextCarry + (out[i] >> 28)
out[i] &= bottom28Bits
}
p256ReduceCarry(out, carry)
}
// p256CopyConditional sets out=in if mask = 0xffffffff in constant time.
//
// On entry: mask is either 0 or 0xffffffff.
func p256CopyConditional(out, in *[p256Limbs]uint32, mask uint32) {
for i := 0; i < p256Limbs; i++ {
tmp := mask & (in[i] ^ out[i])
out[i] ^= tmp
}
}
// p256FromBig sets out = R*in.
func p256FromBig(out *[p256Limbs]uint32, in *big.Int) {
tmp := new(big.Int).Lsh(in, 257)
tmp.Mod(tmp, p256Params.P)
for i := 0; i < p256Limbs; i++ {
if bits := tmp.Bits(); len(bits) > 0 {
out[i] = uint32(bits[0]) & bottom29Bits
} else {
out[i] = 0
}
tmp.Rsh(tmp, 29)
i++
if i == p256Limbs {
break
}
if bits := tmp.Bits(); len(bits) > 0 {
out[i] = uint32(bits[0]) & bottom28Bits
} else {
out[i] = 0
}
tmp.Rsh(tmp, 28)
}
}
// p256ToBig returns a *big.Int containing the value of in.
func p256ToBig(in *[p256Limbs]uint32) *big.Int {
result, tmp := new(big.Int), new(big.Int)
result.SetInt64(int64(in[p256Limbs-1]))
for i := p256Limbs - 2; i >= 0; i-- {
if (i & 1) == 0 {
result.Lsh(result, 29)
} else {
result.Lsh(result, 28)
}
tmp.SetInt64(int64(in[i]))
result.Add(result, tmp)
}
result.Mul(result, p256RInverse)
result.Mod(result, p256Params.P)
return result
}

View file

@ -0,0 +1,15 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !amd64 && !s390x && !arm64 && !ppc64le
// +build !amd64,!s390x,!arm64,!ppc64le
package elliptic
var p256 p256Curve
func initP256Arch() {
// Use pure Go constant-time implementation.
p256 = p256Curve{p256Params}
}

View file

@ -35,7 +35,6 @@ var (
func initP256Arch() { func initP256Arch() {
p256 = p256CurveFast{p256Params} p256 = p256CurveFast{p256Params}
initTable() initTable()
return
} }
func (curve p256CurveFast) Params() *CurveParams { func (curve p256CurveFast) Params() *CurveParams {
@ -73,7 +72,6 @@ func p256MovCond(res, a, b *p256Point, cond int)
//go:noescape //go:noescape
func p256Select(point *p256Point, table []p256Point, idx int) func p256Select(point *p256Point, table []p256Point, idx int)
//
//go:noescape //go:noescape
func p256SelectBase(point *p256Point, table []p256Point, idx int) func p256SelectBase(point *p256Point, table []p256Point, idx int)
@ -85,12 +83,9 @@ func p256SelectBase(point *p256Point, table []p256Point, idx int)
//go:noescape //go:noescape
func p256PointAddAffineAsm(res, in1, in2 *p256Point, sign, sel, zero int) func p256PointAddAffineAsm(res, in1, in2 *p256Point, sign, sel, zero int)
// Point add
//
//go:noescape //go:noescape
func p256PointAddAsm(res, in1, in2 *p256Point) int func p256PointAddAsm(res, in1, in2 *p256Point) int
//
//go:noescape //go:noescape
func p256PointDoubleAsm(res, in *p256Point) func p256PointDoubleAsm(res, in *p256Point)
@ -340,7 +335,6 @@ func boothW7(in uint) (int, int) {
} }
func initTable() { func initTable() {
p256PreFast = new([37][64]p256Point) p256PreFast = new([37][64]p256Point)
// TODO: For big endian, these slices should be in reverse byte order, // TODO: For big endian, these slices should be in reverse byte order,
@ -352,7 +346,6 @@ func initTable() {
0x25, 0xf3, 0x21, 0xdd, 0x88, 0x86, 0xe8, 0xd2, 0x85, 0x5d, 0x88, 0x25, 0x18, 0xff, 0x71, 0x85}, //(p256.y*2^256)%p 0x25, 0xf3, 0x21, 0xdd, 0x88, 0x86, 0xe8, 0xd2, 0x85, 0x5d, 0x88, 0x25, 0x18, 0xff, 0x71, 0x85}, //(p256.y*2^256)%p
z: [32]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, z: [32]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}, //(p256.z*2^256)%p 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}, //(p256.z*2^256)%p
} }
t1 := new(p256Point) t1 := new(p256Point)

View file

@ -60,7 +60,6 @@ func initP256Arch() {
// No vector support, use pure Go implementation. // No vector support, use pure Go implementation.
p256 = p256Curve{p256Params} p256 = p256Curve{p256Params}
return
} }
func (curve p256CurveFast) Params() *CurveParams { func (curve p256CurveFast) Params() *CurveParams {

View file

@ -1,144 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"crypto/elliptic/internal/nistec"
"crypto/rand"
"math/big"
)
// p384Curve is a Curve implementation based on nistec.P384Point.
//
// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
// legacy idiosyncrasies it requires, such as invalid and infinity point
// handling.
//
// To interact with the nistec package, points are encoded into and decoded from
// properly formatted byte slices. All big.Int use is limited to this package.
// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
// so the overhead is acceptable.
type p384Curve struct {
params *CurveParams
}
var p384 p384Curve
var _ Curve = p384
func initP384() {
p384.params = &CurveParams{
Name: "P-384",
BitSize: 384,
// FIPS 186-4, section D.1.2.4
P: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667948293404245721771496870329047266088258938001861606973112319"),
N: bigFromDecimal("394020061963944792122790401001436138050797392704654" +
"46667946905279627659399113263569398956308152294913554433653942643"),
B: bigFromHex("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088" +
"f5013875ac656398d8a2ed19d2a85c8edd3ec2aef"),
Gx: bigFromHex("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741" +
"e082542a385502f25dbf55296c3a545e3872760ab7"),
Gy: bigFromHex("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da31" +
"13b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f"),
}
}
func (curve p384Curve) Params() *CurveParams {
return curve.params
}
func (curve p384Curve) IsOnCurve(x, y *big.Int) bool {
// IsOnCurve is documented to reject (0, 0), the conventional point at
// infinity, which however is accepted by p384PointFromAffine.
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
_, ok := p384PointFromAffine(x, y)
return ok
}
func p384PointFromAffine(x, y *big.Int) (p *nistec.P384Point, ok bool) {
// (0, 0) is by convention the point at infinity, which can't be represented
// in affine coordinates. Marshal incorrectly encodes it as an uncompressed
// point, which SetBytes would correctly reject. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return nistec.NewP384Point(), true
}
if x.Sign() < 0 || y.Sign() < 0 {
return nil, false
}
if x.BitLen() > 384 || y.BitLen() > 384 {
return nil, false
}
p, err := nistec.NewP384Point().SetBytes(Marshal(P384(), x, y))
if err != nil {
return nil, false
}
return p, true
}
func p384PointToAffine(p *nistec.P384Point) (x, y *big.Int) {
out := p.Bytes()
if len(out) == 1 && out[0] == 0 {
// This is the correct encoding of the point at infinity, which
// Unmarshal does not support. See Issue 37294.
return new(big.Int), new(big.Int)
}
x, y = Unmarshal(P384(), out)
if x == nil {
panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
}
return x, y
}
// p384RandomPoint returns a random point on the curve. It's used when Add,
// Double, or ScalarMult are fed a point not on the curve, which is undefined
// behavior. Originally, we used to do the math on it anyway (which allows
// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
// happening in the first place. Now, we just can't construct a nistec.P384Point
// for an invalid pair of coordinates, because that API is safer. If we panic,
// we risk introducing a DoS. If we return nil, we risk a panic. If we return
// the input, ecdsa.Verify might fail open. The safest course seems to be to
// return a valid, random point, which hopefully won't help the attacker.
func p384RandomPoint() (x, y *big.Int) {
_, x, y, err := GenerateKey(P384(), rand.Reader)
if err != nil {
panic("crypto/elliptic: failed to generate random point")
}
return x, y
}
func (p384Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
p1, ok := p384PointFromAffine(x1, y1)
if !ok {
return p384RandomPoint()
}
p2, ok := p384PointFromAffine(x2, y2)
if !ok {
return p384RandomPoint()
}
return p384PointToAffine(p1.Add(p1, p2))
}
func (p384Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
p, ok := p384PointFromAffine(x1, y1)
if !ok {
return p384RandomPoint()
}
return p384PointToAffine(p.Double(p))
}
func (p384Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
p, ok := p384PointFromAffine(Bx, By)
if !ok {
return p384RandomPoint()
}
return p384PointToAffine(p.ScalarMult(p, scalar))
}
func (p384Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
p := nistec.NewP384Generator()
return p384PointToAffine(p.ScalarMult(p, scalar))
}

View file

@ -1,165 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import (
"crypto/elliptic/internal/nistec"
"crypto/rand"
"math/big"
)
// p521Curve is a Curve implementation based on nistec.P521Point.
//
// It's a wrapper that exposes the big.Int-based Curve interface and encodes the
// legacy idiosyncrasies it requires, such as invalid and infinity point
// handling.
//
// To interact with the nistec package, points are encoded into and decoded from
// properly formatted byte slices. All big.Int use is limited to this package.
// Encoding and decoding is 1/1000th of the runtime of a scalar multiplication,
// so the overhead is acceptable.
type p521Curve struct {
params *CurveParams
}
var p521 p521Curve
var _ Curve = p521
func initP521() {
p521.params = &CurveParams{
Name: "P-521",
BitSize: 521,
// FIPS 186-4, section D.1.2.5
P: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765605212255964066145455497729631139148" +
"0858037121987999716643812574028291115057151"),
N: bigFromDecimal("68647976601306097149819007990813932172694353001433" +
"0540939446345918554318339765539424505774633321719753296399637136" +
"3321113864768612440380340372808892707005449"),
B: bigFromHex("0051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8" +
"b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef" +
"451fd46b503f00"),
Gx: bigFromHex("00c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f8" +
"28af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf9" +
"7e7e31c2e5bd66"),
Gy: bigFromHex("011839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817" +
"afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088" +
"be94769fd16650"),
}
}
func (curve p521Curve) Params() *CurveParams {
return curve.params
}
func (curve p521Curve) IsOnCurve(x, y *big.Int) bool {
// IsOnCurve is documented to reject (0, 0), the conventional point at
// infinity, which however is accepted by p521PointFromAffine.
if x.Sign() == 0 && y.Sign() == 0 {
return false
}
_, ok := p521PointFromAffine(x, y)
return ok
}
func p521PointFromAffine(x, y *big.Int) (p *nistec.P521Point, ok bool) {
// (0, 0) is by convention the point at infinity, which can't be represented
// in affine coordinates. Marshal incorrectly encodes it as an uncompressed
// point, which SetBytes would correctly reject. See Issue 37294.
if x.Sign() == 0 && y.Sign() == 0 {
return nistec.NewP521Point(), true
}
if x.Sign() < 0 || y.Sign() < 0 {
return nil, false
}
if x.BitLen() > 521 || y.BitLen() > 521 {
return nil, false
}
p, err := nistec.NewP521Point().SetBytes(Marshal(P521(), x, y))
if err != nil {
return nil, false
}
return p, true
}
func p521PointToAffine(p *nistec.P521Point) (x, y *big.Int) {
out := p.Bytes()
if len(out) == 1 && out[0] == 0 {
// This is the correct encoding of the point at infinity, which
// Unmarshal does not support. See Issue 37294.
return new(big.Int), new(big.Int)
}
x, y = Unmarshal(P521(), out)
if x == nil {
panic("crypto/elliptic: internal error: Unmarshal rejected a valid point encoding")
}
return x, y
}
// p521RandomPoint returns a random point on the curve. It's used when Add,
// Double, or ScalarMult are fed a point not on the curve, which is undefined
// behavior. Originally, we used to do the math on it anyway (which allows
// invalid curve attacks) and relied on the caller and Unmarshal to avoid this
// happening in the first place. Now, we just can't construct a nistec.P521Point
// for an invalid pair of coordinates, because that API is safer. If we panic,
// we risk introducing a DoS. If we return nil, we risk a panic. If we return
// the input, ecdsa.Verify might fail open. The safest course seems to be to
// return a valid, random point, which hopefully won't help the attacker.
func p521RandomPoint() (x, y *big.Int) {
_, x, y, err := GenerateKey(P521(), rand.Reader)
if err != nil {
panic("crypto/elliptic: failed to generate random point")
}
return x, y
}
func (p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
p1, ok := p521PointFromAffine(x1, y1)
if !ok {
return p521RandomPoint()
}
p2, ok := p521PointFromAffine(x2, y2)
if !ok {
return p521RandomPoint()
}
return p521PointToAffine(p1.Add(p1, p2))
}
func (p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
p, ok := p521PointFromAffine(x1, y1)
if !ok {
return p521RandomPoint()
}
return p521PointToAffine(p.Double(p))
}
func (p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {
p, ok := p521PointFromAffine(Bx, By)
if !ok {
return p521RandomPoint()
}
return p521PointToAffine(p.ScalarMult(p, scalar))
}
func (p521Curve) ScalarBaseMult(scalar []byte) (*big.Int, *big.Int) {
p := nistec.NewP521Generator()
return p521PointToAffine(p.ScalarMult(p, scalar))
}
func bigFromDecimal(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 10)
if !ok {
panic("invalid encoding")
}
return b
}
func bigFromHex(s string) *big.Int {
b, ok := new(big.Int).SetString(s, 16)
if !ok {
panic("invalid encoding")
}
return b
}

View file

@ -0,0 +1,296 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package elliptic
import "math/big"
// CurveParams contains the parameters of an elliptic curve and also provides
// a generic, non-constant time implementation of Curve.
type CurveParams struct {
P *big.Int // the order of the underlying field
N *big.Int // the order of the base point
B *big.Int // the constant of the curve equation
Gx, Gy *big.Int // (x,y) of the base point
BitSize int // the size of the underlying field
Name string // the canonical name of the curve
}
func (curve *CurveParams) Params() *CurveParams {
return curve
}
// CurveParams operates, internally, on Jacobian coordinates. For a given
// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
// calculation can be performed within the transform (as in ScalarMult and
// ScalarBaseMult). But even for Add and Double, it's faster to apply and
// reverse the transform than to operate in affine coordinates.
// polynomial returns x³ - 3x + b.
func (curve *CurveParams) polynomial(x *big.Int) *big.Int {
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
threeX := new(big.Int).Lsh(x, 1)
threeX.Add(threeX, x)
x3.Sub(x3, threeX)
x3.Add(x3, curve.B)
x3.Mod(x3, curve.P)
return x3
}
func (curve *CurveParams) IsOnCurve(x, y *big.Int) bool {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.IsOnCurve(x, y)
}
if x.Sign() < 0 || x.Cmp(curve.P) >= 0 ||
y.Sign() < 0 || y.Cmp(curve.P) >= 0 {
return false
}
// y² = x³ - 3x + b
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
return curve.polynomial(x).Cmp(y2) == 0
}
// zForAffine returns a Jacobian Z value for the affine point (x, y). If x and
// y are zero, it assumes that they represent the point at infinity because (0,
// 0) is not on the any of the curves handled here.
func zForAffine(x, y *big.Int) *big.Int {
z := new(big.Int)
if x.Sign() != 0 || y.Sign() != 0 {
z.SetInt64(1)
}
return z
}
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file. If the point is ∞ it returns 0, 0.
func (curve *CurveParams) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
if z.Sign() == 0 {
return new(big.Int), new(big.Int)
}
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
xOut = new(big.Int).Mul(x, zinvsq)
xOut.Mod(xOut, curve.P)
zinvsq.Mul(zinvsq, zinv)
yOut = new(big.Int).Mul(y, zinvsq)
yOut.Mod(yOut, curve.P)
return
}
func (curve *CurveParams) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.Add(x1, y1, x2, y2)
}
z1 := zForAffine(x1, y1)
z2 := zForAffine(x2, y2)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z1, x2, y2, z2))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *CurveParams) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
x3, y3, z3 := new(big.Int), new(big.Int), new(big.Int)
if z1.Sign() == 0 {
x3.Set(x2)
y3.Set(y2)
z3.Set(z2)
return x3, y3, z3
}
if z2.Sign() == 0 {
x3.Set(x1)
y3.Set(y1)
z3.Set(z1)
return x3, y3, z3
}
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
xEqual := h.Sign() == 0
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
yEqual := r.Sign() == 0
if xEqual && yEqual {
return curve.doubleJacobian(x1, y1, z1)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3.Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3.Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3.Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
z3.Sub(z3, z2z2)
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
func (curve *CurveParams) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p384, p521); ok {
return specific.Double(x1, y1)
}
z1 := zForAffine(x1, y1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
func (curve *CurveParams) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
// See https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
delta := new(big.Int).Mul(z, z)
delta.Mod(delta, curve.P)
gamma := new(big.Int).Mul(y, y)
gamma.Mod(gamma, curve.P)
alpha := new(big.Int).Sub(x, delta)
if alpha.Sign() == -1 {
alpha.Add(alpha, curve.P)
}
alpha2 := new(big.Int).Add(x, delta)
alpha.Mul(alpha, alpha2)
alpha2.Set(alpha)
alpha.Lsh(alpha, 1)
alpha.Add(alpha, alpha2)
beta := alpha2.Mul(x, gamma)
x3 := new(big.Int).Mul(alpha, alpha)
beta8 := new(big.Int).Lsh(beta, 3)
beta8.Mod(beta8, curve.P)
x3.Sub(x3, beta8)
if x3.Sign() == -1 {
x3.Add(x3, curve.P)
}
x3.Mod(x3, curve.P)
z3 := new(big.Int).Add(y, z)
z3.Mul(z3, z3)
z3.Sub(z3, gamma)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, delta)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mod(z3, curve.P)
beta.Lsh(beta, 2)
beta.Sub(beta, x3)
if beta.Sign() == -1 {
beta.Add(beta, curve.P)
}
y3 := alpha.Mul(alpha, beta)
gamma.Mul(gamma, gamma)
gamma.Lsh(gamma, 3)
gamma.Mod(gamma, curve.P)
y3.Sub(y3, gamma)
if y3.Sign() == -1 {
y3.Add(y3, curve.P)
}
y3.Mod(y3, curve.P)
return x3, y3, z3
}
func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
return specific.ScalarMult(Bx, By, k)
}
Bz := new(big.Int).SetInt64(1)
x, y, z := new(big.Int), new(big.Int), new(big.Int)
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
x, y, z = curve.doubleJacobian(x, y, z)
if byte&0x80 == 0x80 {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
byte <<= 1
}
}
return curve.affineFromJacobian(x, y, z)
}
func (curve *CurveParams) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
// If there is a dedicated constant-time implementation for this curve operation,
// use that instead of the generic one.
if specific, ok := matchesSpecificCurve(curve, p224, p256, p384, p521); ok {
return specific.ScalarBaseMult(k)
}
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
func matchesSpecificCurve(params *CurveParams, available ...Curve) (Curve, bool) {
for _, c := range available {
if params == c.Params() {
return c, true
}
}
return nil, false
}

View file

@ -21,7 +21,7 @@ func init() {
// is returned by a single call to getrandom() on systems where int // is returned by a single call to getrandom() on systems where int
// has a size of 32 bits. // has a size of 32 bits.
maxGetRandomRead = (1 << 25) - 1 maxGetRandomRead = (1 << 25) - 1
case "freebsd", "dragonfly", "solaris": case "freebsd", "dragonfly", "solaris", "illumos":
maxGetRandomRead = 1 << 8 maxGetRandomRead = 1 << 8
default: default:
panic("no maximum specified for GetRandom") panic("no maximum specified for GetRandom")

View file

@ -18,7 +18,6 @@ import (
"crypto/x509" "crypto/x509"
"errors" "errors"
"fmt" "fmt"
"internal/godebug"
"io" "io"
"net" "net"
"strings" "strings"
@ -977,9 +976,6 @@ var supportedVersions = []uint16{
VersionTLS10, VersionTLS10,
} }
// debugEnableTLS10 enables TLS 1.0. See issue 45428.
var debugEnableTLS10 = godebug.Get("tls10default") == "1"
// roleClient and roleServer are meant to call supportedVersions and parents // roleClient and roleServer are meant to call supportedVersions and parents
// with more readability at the callsite. // with more readability at the callsite.
const roleClient = true const roleClient = true
@ -991,7 +987,7 @@ func (c *Config) supportedVersions(isClient bool) []uint16 {
if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) { if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) {
continue continue
} }
if (c == nil || c.MinVersion == 0) && !debugEnableTLS10 && if (c == nil || c.MinVersion == 0) &&
isClient && v < VersionTLS12 { isClient && v < VersionTLS12 {
continue continue
} }

View file

@ -384,6 +384,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
return false return false
} }
seenExts := make(map[uint16]bool)
for !extensions.Empty() { for !extensions.Empty() {
var extension uint16 var extension uint16
var extData cryptobyte.String var extData cryptobyte.String
@ -392,6 +393,11 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool {
return false return false
} }
if seenExts[extension] {
return false
}
seenExts[extension] = true
switch extension { switch extension {
case extensionServerName: case extensionServerName:
// RFC 6066, Section 3 // RFC 6066, Section 3
@ -750,6 +756,7 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool {
return false return false
} }
seenExts := make(map[uint16]bool)
for !extensions.Empty() { for !extensions.Empty() {
var extension uint16 var extension uint16
var extData cryptobyte.String var extData cryptobyte.String
@ -758,6 +765,11 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool {
return false return false
} }
if seenExts[extension] {
return false
}
seenExts[extension] = true
switch extension { switch extension {
case extensionStatusRequest: case extensionStatusRequest:
m.ocspStapling = true m.ocspStapling = true

View file

@ -6,6 +6,7 @@ package tls
import ( import (
"bytes" "bytes"
"encoding/hex"
"math/rand" "math/rand"
"reflect" "reflect"
"strings" "strings"
@ -463,3 +464,23 @@ func TestRejectEmptySCT(t *testing.T) {
t.Fatal("Unmarshaled ServerHello with zero-length SCT") t.Fatal("Unmarshaled ServerHello with zero-length SCT")
} }
} }
func TestRejectDuplicateExtensions(t *testing.T) {
clientHelloBytes, err := hex.DecodeString("010000440303000000000000000000000000000000000000000000000000000000000000000000000000001c0000000a000800000568656c6c6f0000000a000800000568656c6c6f")
if err != nil {
t.Fatalf("failed to decode test ClientHello: %s", err)
}
var clientHelloCopy clientHelloMsg
if clientHelloCopy.unmarshal(clientHelloBytes) {
t.Error("Unmarshaled ClientHello with duplicate extensions")
}
serverHelloBytes, err := hex.DecodeString("02000030030300000000000000000000000000000000000000000000000000000000000000000000000000080005000000050000")
if err != nil {
t.Fatalf("failed to decode test ServerHello: %s", err)
}
var serverHelloCopy serverHelloMsg
if serverHelloCopy.unmarshal(serverHelloBytes) {
t.Fatal("Unmarshaled ServerHello with duplicate extensions")
}
}

View file

@ -400,16 +400,6 @@ func TestVersion(t *testing.T) {
if err == nil { if err == nil {
t.Fatalf("expected failure to connect with TLS 1.0/1.1") t.Fatalf("expected failure to connect with TLS 1.0/1.1")
} }
defer func(old bool) { debugEnableTLS10 = old }(debugEnableTLS10)
debugEnableTLS10 = true
_, _, err = testHandshake(t, clientConfig, serverConfig)
if err != nil {
t.Fatalf("handshake failed: %s", err)
}
if state.Version != VersionTLS11 {
t.Fatalf("incorrect version %x, should be %x", state.Version, VersionTLS11)
}
} }
func TestCipherSuitePreference(t *testing.T) { func TestCipherSuitePreference(t *testing.T) {

View file

@ -7,6 +7,7 @@ package x509
import ( import (
"bytes" "bytes"
"crypto" "crypto"
"crypto/x509/pkix"
"errors" "errors"
"fmt" "fmt"
"net" "net"
@ -837,6 +838,50 @@ func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate
return n return n
} }
// alreadyInChain checks whether a candidate certificate is present in a chain.
// Rather than doing a direct byte for byte equivalency check, we check if the
// subject, public key, and SAN, if present, are equal. This prevents loops that
// are created by mutual cross-signatures, or other cross-signature bridge
// oddities.
func alreadyInChain(candidate *Certificate, chain []*Certificate) bool {
type pubKeyEqual interface {
Equal(crypto.PublicKey) bool
}
var candidateSAN *pkix.Extension
for _, ext := range candidate.Extensions {
if ext.Id.Equal(oidExtensionSubjectAltName) {
candidateSAN = &ext
break
}
}
for _, cert := range chain {
if !bytes.Equal(candidate.RawSubject, cert.RawSubject) {
continue
}
if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) {
continue
}
var certSAN *pkix.Extension
for _, ext := range cert.Extensions {
if ext.Id.Equal(oidExtensionSubjectAltName) {
certSAN = &ext
break
}
}
if candidateSAN == nil && certSAN == nil {
return true
} else if candidateSAN == nil || certSAN == nil {
return false
}
if bytes.Equal(candidateSAN.Value, certSAN.Value) {
return true
}
}
return false
}
// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls // maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls
// that an invocation of buildChains will (transitively) make. Most chains are // that an invocation of buildChains will (transitively) make. Most chains are
// less than 15 certificates long, so this leaves space for multiple chains and // less than 15 certificates long, so this leaves space for multiple chains and
@ -849,19 +894,10 @@ func (c *Certificate) buildChains(currentChain []*Certificate, sigChecks *int, o
hintCert *Certificate hintCert *Certificate
) )
type pubKeyEqual interface {
Equal(crypto.PublicKey) bool
}
considerCandidate := func(certType int, candidate *Certificate) { considerCandidate := func(certType int, candidate *Certificate) {
for _, cert := range currentChain { if alreadyInChain(candidate, currentChain) {
// If a certificate already appeared in the chain we've built, don't
// reconsider it. This prevents loops, for isntance those created by
// mutual cross-signatures, or other cross-signature bridges oddities.
if bytes.Equal(cert.RawSubject, candidate.RawSubject) && cert.PublicKey.(pubKeyEqual).Equal(candidate.PublicKey) {
return return
} }
}
if sigChecks == nil { if sigChecks == nil {
sigChecks = new(int) sigChecks = new(int)

View file

@ -2340,6 +2340,29 @@ func TestPathBuilding(t *testing.T) {
"CN=leaf -> CN=inter b -> CN=inter c -> CN=root", "CN=leaf -> CN=inter b -> CN=inter c -> CN=root",
}, },
}, },
{
// Build a simple two node graph, where the leaf is directly issued from
// the root and both certificates have matching subject and public key, but
// the leaf has SANs.
name: "leaf with same subject, key, as parent but with SAN",
graph: trustGraphDescription{
Roots: []string{"root"},
Leaf: "root",
Graph: []trustGraphEdge{
{
Issuer: "root",
Subject: "root",
Type: leafCertificate,
MutateTemplate: func(c *Certificate) {
c.DNSNames = []string{"localhost"}
},
},
},
},
expectedChains: []string{
"CN=root -> CN=root",
},
},
} }
for _, tc := range tests { for _, tc := range tests {

View file

@ -1478,21 +1478,14 @@ func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv
return nil, errors.New("x509: no SerialNumber given") return nil, errors.New("x509: no SerialNumber given")
} }
// RFC 5280 Section 4.1.2.2: serial number must positive and should not be longer // RFC 5280 Section 4.1.2.2: serial number must positive
// than 20 octets.
// //
// We cannot simply check for len(serialBytes) > 20, because encoding/asn1 may // We _should_ also restrict serials to <= 20 octets, but it turns out a lot of people
// pad the slice in order to prevent the integer being mistaken for a negative // get this wrong, in part because the encoding can itself alter the length of the
// number (DER uses the high bit of the left-most byte to indicate the sign.), // serial. For now we accept these non-conformant serials.
// so we need to double check the composition of the serial if it is exactly
// 20 bytes.
if template.SerialNumber.Sign() == -1 { if template.SerialNumber.Sign() == -1 {
return nil, errors.New("x509: serial number must be positive") return nil, errors.New("x509: serial number must be positive")
} }
serialBytes := template.SerialNumber.Bytes()
if len(serialBytes) > 20 || (len(serialBytes) == 20 && serialBytes[0]&0x80 != 0) {
return nil, errors.New("x509: serial number exceeds 20 octets")
}
if template.BasicConstraintsValid && !template.IsCA && template.MaxPathLen != -1 && (template.MaxPathLen != 0 || template.MaxPathLenZero) { if template.BasicConstraintsValid && !template.IsCA && template.MaxPathLen != -1 && (template.MaxPathLen != 0 || template.MaxPathLenZero) {
return nil, errors.New("x509: only CAs are allowed to specify MaxPathLen") return nil, errors.New("x509: only CAs are allowed to specify MaxPathLen")

View file

@ -3589,42 +3589,6 @@ func TestOmitEmptyExtensions(t *testing.T) {
} }
} }
func TestCreateCertificateLongSerial(t *testing.T) {
k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
t.Fatal(err)
}
serialBytes := make([]byte, 21)
serialBytes[0] = 0x80
serialBytes[20] = 1
tooLong := big.NewInt(0).SetBytes(serialBytes)
tmpl := &Certificate{
SerialNumber: tooLong,
Subject: pkix.Name{
CommonName: ":)",
},
NotAfter: time.Now().Add(time.Hour),
NotBefore: time.Now().Add(-time.Hour),
}
expectedErr := "x509: serial number exceeds 20 octets"
_, err = CreateCertificate(rand.Reader, tmpl, tmpl, k.Public(), k)
if err == nil || err.Error() != expectedErr {
t.Errorf("CreateCertificate returned unexpected error: want %q, got %q", expectedErr, err)
}
serialBytes = serialBytes[:20]
tmpl.SerialNumber = big.NewInt(0).SetBytes(serialBytes)
_, err = CreateCertificate(rand.Reader, tmpl, tmpl, k.Public(), k)
if err == nil || err.Error() != expectedErr {
t.Errorf("CreateCertificate returned unexpected error: want %q, got %q", expectedErr, err)
}
}
var negativeSerialCert = `-----BEGIN CERTIFICATE----- var negativeSerialCert = `-----BEGIN CERTIFICATE-----
MIIBBTCBraADAgECAgH/MAoGCCqGSM49BAMCMA0xCzAJBgNVBAMTAjopMB4XDTIy MIIBBTCBraADAgECAgH/MAoGCCqGSM49BAMCMA0xCzAJBgNVBAMTAjopMB4XDTIy
MDQxNDIzNTYwNFoXDTIyMDQxNTAxNTYwNFowDTELMAkGA1UEAxMCOikwWTATBgcq MDQxNDIzNTYwNFoXDTIyMDQxNTAxNTYwNFowDTELMAkGA1UEAxMCOikwWTATBgcq

View file

@ -136,10 +136,9 @@ const (
// auxiliary symbols: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-symbol-records // auxiliary symbols: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-symbol-records
// COMDAT sections: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#comdat-sections-object-only // COMDAT sections: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#comdat-sections-object-only
// auxiliary info for section definitions: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-format-5-section-definitions // auxiliary info for section definitions: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#auxiliary-format-5-section-definitions
//
func (f *File) COFFSymbolReadSectionDefAux(idx int) (*COFFSymbolAuxFormat5, error) { func (f *File) COFFSymbolReadSectionDefAux(idx int) (*COFFSymbolAuxFormat5, error) {
var rv *COFFSymbolAuxFormat5 var rv *COFFSymbolAuxFormat5
if idx < 0 || idx > len(f.COFFSymbols) { if idx < 0 || idx >= len(f.COFFSymbols) {
return rv, fmt.Errorf("invalid symbol index") return rv, fmt.Errorf("invalid symbol index")
} }
pesym := &f.COFFSymbols[idx] pesym := &f.COFFSymbols[idx]

View file

@ -1185,20 +1185,13 @@ func (ctxt *Context) importGo(p *Package, path, srcDir string, mode ImportMode)
if ctxt.CgoEnabled { if ctxt.CgoEnabled {
cgo = "1" cgo = "1"
} }
cmd.Env = append(os.Environ(), cmd.Env = append(cmd.Environ(),
"GOOS="+ctxt.GOOS, "GOOS="+ctxt.GOOS,
"GOARCH="+ctxt.GOARCH, "GOARCH="+ctxt.GOARCH,
"GOROOT="+ctxt.GOROOT, "GOROOT="+ctxt.GOROOT,
"GOPATH="+ctxt.GOPATH, "GOPATH="+ctxt.GOPATH,
"CGO_ENABLED="+cgo, "CGO_ENABLED="+cgo,
) )
if cmd.Dir != "" {
// If possible, set PWD: if an error occurs and PWD includes a symlink, we
// want the error to refer to Dir, not some other name for it.
if abs, err := filepath.Abs(cmd.Dir); err == nil {
cmd.Env = append(cmd.Env, "PWD="+abs)
}
}
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return fmt.Errorf("go/build: go list %s: %v\n%s\n", path, err, stderr.String()) return fmt.Errorf("go/build: go list %s: %v\n%s\n", path, err, stderr.String())

View file

@ -38,7 +38,7 @@ const (
var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth} var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth}
const parserMode = parser.ParseComments const parserMode = parser.ParseComments | parser.SkipObjectResolution
// Node formats node in canonical gofmt style and writes the result to dst. // Node formats node in canonical gofmt style and writes the result to dst.
// //

View file

@ -136,7 +136,7 @@ func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*type
setUsesCgo(&conf) setUsesCgo(&conf)
file, err := p.cgo(bp) file, err := p.cgo(bp)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("error processing cgo for package %q: %w", bp.ImportPath, err)
} }
files = append(files, file) files = append(files, file)
} }
@ -223,9 +223,9 @@ func (p *Importer) cgo(bp *build.Package) (*ast.File, error) {
args = append(args, bp.CgoCPPFLAGS...) args = append(args, bp.CgoCPPFLAGS...)
if len(bp.CgoPkgConfig) > 0 { if len(bp.CgoPkgConfig) > 0 {
cmd := exec.Command("pkg-config", append([]string{"--cflags"}, bp.CgoPkgConfig...)...) cmd := exec.Command("pkg-config", append([]string{"--cflags"}, bp.CgoPkgConfig...)...)
out, err := cmd.CombinedOutput() out, err := cmd.Output()
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("pkg-config --cflags: %w", err)
} }
args = append(args, strings.Fields(string(out))...) args = append(args, strings.Fields(string(out))...)
} }
@ -237,7 +237,7 @@ func (p *Importer) cgo(bp *build.Package) (*ast.File, error) {
cmd := exec.Command(args[0], args[1:]...) cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = bp.Dir cmd.Dir = bp.Dir
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return nil, err return nil, fmt.Errorf("go tool cgo: %w", err)
} }
return parser.ParseFile(p.fset, filepath.Join(tmpdir, "_cgo_gotypes.go"), nil, 0) return parser.ParseFile(p.fset, filepath.Join(tmpdir, "_cgo_gotypes.go"), nil, 0)

View file

@ -9,6 +9,7 @@ package types
import ( import (
"fmt" "fmt"
"go/ast" "go/ast"
"go/token"
"strings" "strings"
) )
@ -339,11 +340,10 @@ func (check *Checker) initVars(lhs []*Var, origRHS []ast.Expr, returnStmt ast.St
} else if len(rhs) > 0 { } else if len(rhs) > 0 {
at = rhs[len(rhs)-1].expr // report at last value at = rhs[len(rhs)-1].expr // report at last value
} }
check.errorf(at, _WrongResultCount, "%s return values\n\thave %s\n\twant %s", err := newErrorf(at, _WrongResultCount, "%s return values", qualifier)
qualifier, err.errorf(token.NoPos, "have %s", check.typesSummary(operandTypes(rhs), false))
check.typesSummary(operandTypes(rhs), false), err.errorf(token.NoPos, "want %s", check.typesSummary(varTypes(lhs), false))
check.typesSummary(varTypes(lhs), false), check.report(err)
)
return return
} }
if compilerErrorMessages { if compilerErrorMessages {

View file

@ -368,11 +368,10 @@ func (check *Checker) arguments(call *ast.CallExpr, sig *Signature, targs []Type
if sig.params != nil { if sig.params != nil {
params = sig.params.vars params = sig.params.vars
} }
check.errorf(at, _WrongArgCount, "%s arguments in call to %s\n\thave %s\n\twant %s", err := newErrorf(at, _WrongArgCount, "%s arguments in call to %s", qualifier, call.Fun)
qualifier, call.Fun, err.errorf(token.NoPos, "have %s", check.typesSummary(operandTypes(args), false))
check.typesSummary(operandTypes(args), false), err.errorf(token.NoPos, "want %s", check.typesSummary(varTypes(params), sig.variadic))
check.typesSummary(varTypes(params), sig.variadic), check.report(err)
)
return return
} }

View file

@ -8,6 +8,7 @@ package types
import ( import (
"go/constant" "go/constant"
"go/token"
"unicode" "unicode"
) )
@ -74,7 +75,9 @@ func (check *Checker) conversion(x *operand, T Type) {
if compilerErrorMessages { if compilerErrorMessages {
if cause != "" { if cause != "" {
// Add colon at end of line if we have a following cause. // Add colon at end of line if we have a following cause.
check.errorf(x, _InvalidConversion, "cannot convert %s to type %s:\n\t%s", x, T, cause) err := newErrorf(x, _InvalidConversion, "cannot convert %s to type %s:", x, T)
err.errorf(token.NoPos, cause)
check.report(err)
} else { } else {
check.errorf(x, _InvalidConversion, "cannot convert %s to type %s", x, T) check.errorf(x, _InvalidConversion, "cannot convert %s to type %s", x, T)
} }

View file

@ -8,7 +8,6 @@ package types
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"go/ast" "go/ast"
"go/token" "go/token"
@ -26,6 +25,64 @@ func unreachable() {
panic("unreachable") panic("unreachable")
} }
// An error_ represents a type-checking error.
// To report an error_, call Checker.report.
type error_ struct {
desc []errorDesc
code errorCode
soft bool // TODO(gri) eventually determine this from an error code
}
// An errorDesc describes part of a type-checking error.
type errorDesc struct {
posn positioner
format string
args []interface{}
}
func (err *error_) empty() bool {
return err.desc == nil
}
func (err *error_) pos() token.Pos {
if err.empty() {
return token.NoPos
}
return err.desc[0].posn.Pos()
}
func (err *error_) msg(fset *token.FileSet, qf Qualifier) string {
if err.empty() {
return "no error"
}
var buf bytes.Buffer
for i := range err.desc {
p := &err.desc[i]
if i > 0 {
fmt.Fprint(&buf, "\n\t")
if p.posn.Pos().IsValid() {
fmt.Fprintf(&buf, "%s: ", fset.Position(p.posn.Pos()))
}
}
buf.WriteString(sprintf(fset, qf, false, p.format, p.args...))
}
return buf.String()
}
// String is for testing.
func (err *error_) String() string {
if err.empty() {
return "no error"
}
return fmt.Sprintf("%d: %s", err.pos(), err.msg(nil, nil))
}
// errorf adds formatted error information to err.
// It may be called multiple times to provide additional information.
func (err *error_) errorf(at token.Pos, format string, args ...interface{}) {
err.desc = append(err.desc, errorDesc{atPos(at), format, args})
}
func (check *Checker) qualifier(pkg *Package) string { func (check *Checker) qualifier(pkg *Package) string {
// Qualify the package unless it's the package being type-checked. // Qualify the package unless it's the package being type-checked.
if pkg != check.pkg { if pkg != check.pkg {
@ -140,23 +197,34 @@ func (check *Checker) dump(format string, args ...any) {
fmt.Println(sprintf(check.fset, check.qualifier, true, format, args...)) fmt.Println(sprintf(check.fset, check.qualifier, true, format, args...))
} }
func (check *Checker) err(err error) { // Report records the error pointed to by errp, setting check.firstError if
if err == nil { // necessary.
return func (check *Checker) report(errp *error_) {
if errp.empty() {
panic("empty error details")
} }
var e Error
isInternal := errors.As(err, &e) span := spanOf(errp.desc[0].posn)
e := Error{
Fset: check.fset,
Pos: span.pos,
Msg: errp.msg(check.fset, check.qualifier),
Soft: errp.soft,
go116code: errp.code,
go116start: span.start,
go116end: span.end,
}
// Cheap trick: Don't report errors with messages containing // Cheap trick: Don't report errors with messages containing
// "invalid operand" or "invalid type" as those tend to be // "invalid operand" or "invalid type" as those tend to be
// follow-on errors which don't add useful information. Only // follow-on errors which don't add useful information. Only
// exclude them if these strings are not at the beginning, // exclude them if these strings are not at the beginning,
// and only if we have at least one error already reported. // and only if we have at least one error already reported.
isInvalidErr := isInternal && (strings.Index(e.Msg, "invalid operand") > 0 || strings.Index(e.Msg, "invalid type") > 0) isInvalidErr := strings.Index(e.Msg, "invalid operand") > 0 || strings.Index(e.Msg, "invalid type") > 0
if check.firstErr != nil && isInvalidErr { if check.firstErr != nil && isInvalidErr {
return return
} }
if isInternal {
e.Msg = stripAnnotations(e.Msg) e.Msg = stripAnnotations(e.Msg)
if check.errpos != nil { if check.errpos != nil {
// If we have an internal error and the errpos override is set, use it to // If we have an internal error and the errpos override is set, use it to
@ -168,8 +236,7 @@ func (check *Checker) err(err error) {
e.go116start = span.start e.go116start = span.start
e.go116end = span.end e.go116end = span.end
} }
err = e err := e
}
if check.firstErr == nil { if check.firstErr == nil {
check.firstErr = err check.firstErr = err
@ -178,10 +245,6 @@ func (check *Checker) err(err error) {
if trace { if trace {
pos := e.Pos pos := e.Pos
msg := e.Msg msg := e.Msg
if !isInternal {
msg = err.Error()
pos = token.NoPos
}
check.trace(pos, "ERROR: %s", msg) check.trace(pos, "ERROR: %s", msg)
} }
@ -192,35 +255,26 @@ func (check *Checker) err(err error) {
f(err) f(err)
} }
func (check *Checker) newError(at positioner, code errorCode, soft bool, msg string) error { // newErrorf creates a new error_ for later reporting with check.report.
span := spanOf(at) func newErrorf(at positioner, code errorCode, format string, args ...any) *error_ {
return Error{ return &error_{
Fset: check.fset, desc: []errorDesc{{at, format, args}},
Pos: span.pos, code: code,
Msg: msg,
Soft: soft,
go116code: code,
go116start: span.start,
go116end: span.end,
} }
} }
// newErrorf creates a new Error, but does not handle it.
func (check *Checker) newErrorf(at positioner, code errorCode, soft bool, format string, args ...any) error {
msg := check.sprintf(format, args...)
return check.newError(at, code, soft, msg)
}
func (check *Checker) error(at positioner, code errorCode, msg string) { func (check *Checker) error(at positioner, code errorCode, msg string) {
check.err(check.newError(at, code, false, msg)) check.report(newErrorf(at, code, msg))
} }
func (check *Checker) errorf(at positioner, code errorCode, format string, args ...any) { func (check *Checker) errorf(at positioner, code errorCode, format string, args ...any) {
check.error(at, code, check.sprintf(format, args...)) check.report(newErrorf(at, code, format, args...))
} }
func (check *Checker) softErrorf(at positioner, code errorCode, format string, args ...any) { func (check *Checker) softErrorf(at positioner, code errorCode, format string, args ...any) {
check.err(check.newErrorf(at, code, true, format, args...)) err := newErrorf(at, code, format, args...)
err.soft = true
check.report(err)
} }
func (check *Checker) invalidAST(at positioner, format string, args ...any) { func (check *Checker) invalidAST(at positioner, format string, args ...any) {

View file

@ -4,7 +4,30 @@
package types package types
import "testing" import (
"go/token"
"testing"
)
func TestError(t *testing.T) {
var err error_
want := "no error"
if got := err.String(); got != want {
t.Errorf("empty error: got %q, want %q", got, want)
}
want = "0: foo 42"
err.errorf(token.NoPos, "foo %d", 42)
if got := err.String(); got != want {
t.Errorf("simple error: got %q, want %q", got, want)
}
want = "0: foo 42\n\tbar 43"
err.errorf(token.NoPos, "bar %d", 43)
if got := err.String(); got != want {
t.Errorf("simple error: got %q, want %q", got, want)
}
}
func TestStripAnnotations(t *testing.T) { func TestStripAnnotations(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {

View file

@ -87,7 +87,7 @@ func (check *Checker) op(m opPredicates, x *operand, op token.Token) bool {
// overflow checks that the constant x is representable by its type. // overflow checks that the constant x is representable by its type.
// For untyped constants, it checks that the value doesn't become // For untyped constants, it checks that the value doesn't become
// arbitrarily large. // arbitrarily large.
func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) { func (check *Checker) overflow(x *operand, opPos token.Pos) {
assert(x.mode == constant_) assert(x.mode == constant_)
if x.val.Kind() == constant.Unknown { if x.val.Kind() == constant.Unknown {
@ -115,8 +115,8 @@ func (check *Checker) overflow(x *operand, op token.Token, opPos token.Pos) {
} }
} }
// opName returns the name of an operation, or the empty string. // opName returns the name of the operation if x is an operation
// Only operations that might overflow are handled. // that might overflow; otherwise it returns the empty string.
func opName(e ast.Expr) string { func opName(e ast.Expr) string {
switch e := e.(type) { switch e := e.(type) {
case *ast.BinaryExpr: case *ast.BinaryExpr:
@ -213,7 +213,7 @@ func (check *Checker) unary(x *operand, e *ast.UnaryExpr) {
} }
x.val = constant.UnaryOp(e.Op, x.val, prec) x.val = constant.UnaryOp(e.Op, x.val, prec)
x.expr = e x.expr = e
check.overflow(x, e.Op, x.Pos()) check.overflow(x, x.Pos())
return return
} }
@ -991,7 +991,7 @@ func (check *Checker) shift(x, y *operand, e ast.Expr, op token.Token) {
if b, _ := e.(*ast.BinaryExpr); b != nil { if b, _ := e.(*ast.BinaryExpr); b != nil {
opPos = b.OpPos opPos = b.OpPos
} }
check.overflow(x, op, opPos) check.overflow(x, opPos)
return return
} }
@ -1171,7 +1171,7 @@ func (check *Checker) binary(x *operand, e ast.Expr, lhs, rhs ast.Expr, op token
} }
x.val = constant.BinaryOp(x.val, op, y.val) x.val = constant.BinaryOp(x.val, op, y.val)
x.expr = e x.expr = e
check.overflow(x, op, opPos) check.overflow(x, opPos)
return return
} }

View file

@ -166,10 +166,11 @@ func (s *StdSizes) Sizeof(T Type) int64 {
// common architecture word sizes and alignments // common architecture word sizes and alignments
var gcArchSizes = map[string]*StdSizes{ var gcArchSizes = map[string]*StdSizes{
"386": {4, 4}, "386": {4, 4},
"arm": {4, 4},
"arm64": {8, 8},
"amd64": {8, 8}, "amd64": {8, 8},
"amd64p32": {4, 8}, "amd64p32": {4, 8},
"arm": {4, 4},
"arm64": {8, 8},
"loong64": {8, 8},
"mips": {4, 4}, "mips": {4, 4},
"mipsle": {4, 4}, "mipsle": {4, 4},
"mips64": {8, 8}, "mips64": {8, 8},
@ -188,7 +189,7 @@ var gcArchSizes = map[string]*StdSizes{
// The result is nil if a compiler/architecture pair is not known. // The result is nil if a compiler/architecture pair is not known.
// //
// Supported architectures for compiler "gc": // Supported architectures for compiler "gc":
// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", // "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". // "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
func SizesFor(compiler, arch string) Sizes { func SizesFor(compiler, arch string) Sizes {
var m map[string]*StdSizes var m map[string]*StdSizes

View file

@ -349,6 +349,25 @@ const _ = unsafe.Sizeof(func() {
assert(iota == 0) assert(iota == 0)
}) })
// issue #52438
const i1 = iota
const i2 = iota
const i3 = iota
func _() {
assert(i1 == 0)
assert(i2 == 0)
assert(i3 == 0)
const i4 = iota
const i5 = iota
const i6 = iota
assert(i4 == 0)
assert(i5 == 0)
assert(i6 == 0)
}
// untyped constants must not get arbitrarily large // untyped constants must not get arbitrarily large
const prec = 512 // internal maximum precision for integers const prec = 512 // internal maximum precision for integers
const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1 const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1

View file

@ -0,0 +1,11 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package p
func _() {
const x = 0
x /* ERROR cannot assign to x */ += 1
x /* ERROR cannot assign to x */ ++
}

View file

@ -252,21 +252,20 @@ func (h *Hash) Sum64() uint64 {
// MakeSeed returns a new random seed. // MakeSeed returns a new random seed.
func MakeSeed() Seed { func MakeSeed() Seed {
var s1, s2 uint64 var s uint64
for { for {
s1 = uint64(runtime_fastrand()) s = runtime_fastrand64()
s2 = uint64(runtime_fastrand())
// We use seed 0 to indicate an uninitialized seed/hash, // We use seed 0 to indicate an uninitialized seed/hash,
// so keep trying until we get a non-zero seed. // so keep trying until we get a non-zero seed.
if s1|s2 != 0 { if s != 0 {
break break
} }
} }
return Seed{s: s1<<32 + s2} return Seed{s: s}
} }
//go:linkname runtime_fastrand runtime.fastrand //go:linkname runtime_fastrand64 runtime.fastrand64
func runtime_fastrand() uint32 func runtime_fastrand64() uint64
func rthash(ptr *byte, len int, seed uint64) uint64 { func rthash(ptr *byte, len int, seed uint64) uint64 {
if len == 0 { if len == 0 {

View file

@ -21,11 +21,12 @@ TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
CMP R5,R6,CR7 CMP R5,R6,CR7
CMP R3,R4,CR6 CMP R3,R4,CR6
BEQ CR7,equal BEQ CR7,equal
#ifdef GOARCH_ppc64le MOVBZ internalcpu·PPC64+const_offsetPPC64HasPOWER9(SB), R16
BR cmpbodyLE<>(SB) CMP R16,$1
#else BNE power8
BR cmpbodyBE<>(SB) BR cmpbodyp9<>(SB)
#endif power8:
BR cmpbody<>(SB)
equal: equal:
BEQ CR6,done BEQ CR6,done
MOVD $1, R8 MOVD $1, R8
@ -52,11 +53,12 @@ TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
CMP R5,R6,CR7 CMP R5,R6,CR7
CMP R3,R4,CR6 CMP R3,R4,CR6
BEQ CR7,equal BEQ CR7,equal
#ifdef GOARCH_ppc64le MOVBZ internalcpu·PPC64+const_offsetPPC64HasPOWER9(SB), R16
BR cmpbodyLE<>(SB) CMP R16,$1
#else BNE power8
BR cmpbodyBE<>(SB) BR cmpbodyp9<>(SB)
#endif power8:
BR cmpbody<>(SB)
equal: equal:
BEQ CR6,done BEQ CR6,done
MOVD $1, R8 MOVD $1, R8
@ -70,108 +72,225 @@ done:
MOVD $0, R3 MOVD $0, R3
RET RET
// Do an efficient memcmp for ppc64le #ifdef GOARCH_ppc64le
DATA byteswap<>+0(SB)/8, $0x0706050403020100
DATA byteswap<>+8(SB)/8, $0x0f0e0d0c0b0a0908
GLOBL byteswap<>+0(SB), RODATA, $16
#define SWAP V21
#endif
// Do an efficient memcmp for ppc64le/ppc64/POWER8
// R3 = a len // R3 = a len
// R4 = b len // R4 = b len
// R5 = a addr // R5 = a addr
// R6 = b addr // R6 = b addr
// On exit: // On exit:
// R3 = return value // R3 = return value
TEXT cmpbodyLE<>(SB),NOSPLIT|NOFRAME,$0-0 TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0-0
MOVD R3,R8 // set up length MOVD R3,R8 // set up length
CMP R3,R4,CR2 // unequal? CMP R3,R4,CR2 // unequal?
BC 12,8,setuplen // BLT CR2 BLT CR2,setuplen // BLT CR2
MOVD R4,R8 // use R4 for comparison len MOVD R4,R8 // use R4 for comparison len
setuplen: setuplen:
MOVD R8,CTR // set up loop counter
CMP R8,$8 // only optimize >=8
BLT simplecheck
DCBT (R5) // cache hint
DCBT (R6)
CMP R8,$32 // optimize >= 32 CMP R8,$32 // optimize >= 32
MOVD R8,R9 MOVD R8,R9
BLT setup8a // 8 byte moves only BLT setup8a // optimize < 32
setup32a: MOVD $16,R10 // set offsets to load into vectors
SRADCC $5,R8,R9 // number of 32 byte chunks CMP R8,$64
MOVD R9,CTR BLT cmp32 // process size 32-63
// Special processing for 32 bytes or longer. DCBT (R5) // optimize >= 64
// Loading this way is faster and correct as long as the DCBT (R6) // cache hint
// doublewords being compared are equal. Once they MOVD $32,R11 // set offsets to load into vector
// are found unequal, reload them in proper byte order MOVD $48,R12 // set offsets to load into vector
// to determine greater or less than.
loop32a: loop64a:// process size 64 and greater
MOVD 0(R5),R9 // doublewords to compare LXVD2X (R5)(R0),V3 // load bytes of A at offset 0 into vector
MOVD 0(R6),R10 // get 4 doublewords LXVD2X (R6)(R0),V4 // load bytes of B at offset 0 into vector
MOVD 8(R5),R14 VCMPEQUDCC V3,V4,V1
MOVD 8(R6),R15 BGE CR6,different // jump out if its different
CMPU R9,R10 // bytes equal?
MOVD $0,R16 // set up for cmpne LXVD2X (R5)(R10),V3 // load bytes of A at offset 16 into vector
BNE cmpne // further compare for LT or GT LXVD2X (R6)(R10),V4 // load bytes of B at offset 16 into vector
MOVD 16(R5),R9 // get next pair of doublewords
MOVD 16(R6),R10 VCMPEQUDCC V3,V4,V1
CMPU R14,R15 // bytes match? BGE CR6,different
MOVD $8,R16 // set up for cmpne
BNE cmpne // further compare for LT or GT LXVD2X (R5)(R11),V3 // load bytes of A at offset 32 into vector
MOVD 24(R5),R14 // get next pair of doublewords LXVD2X (R6)(R11),V4 // load bytes of B at offset 32 into vector
MOVD 24(R6),R15
CMPU R9,R10 // bytes match? VCMPEQUDCC V3,V4,V1
MOVD $16,R16 // set up for cmpne BGE CR6,different
BNE cmpne // further compare for LT or GT
MOVD $-8,R16 // for cmpne, R5,R6 already inc by 32 LXVD2X (R5)(R12),V3 // load bytes of A at offset 64 into vector
ADD $32,R5 // bump up to next 32 LXVD2X (R6)(R12),V4 // load bytes of B at offset 64 into vector
ADD $32,R6
CMPU R14,R15 // bytes match? VCMPEQUDCC V3,V4,V1
BC 8,2,loop32a // br ctr and cr BGE CR6,different
BNE cmpne
ADD $-64,R9,R9 // reduce remaining size by 64
ADD $64,R5,R5 // increment to next 64 bytes of A
ADD $64,R6,R6 // increment to next 64 bytes of B
CMPU R9,$64
BGE loop64a // loop back to loop64a only if there are >= 64 bytes remaining
CMPU R9,$32
BGE cmp32 // loop to cmp32 if there are 32-64 bytes remaining
CMPU R9,$0
BNE rem // loop to rem if the remainder is not 0
BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
BLT CR2,less // jump to less if len(A)<len(B)
BR greater // jump to greater otherwise
cmp32:
LXVD2X (R5)(R0),V3 // load bytes of A at offset 0 into vector
LXVD2X (R6)(R0),V4 // load bytes of B at offset 0 into vector
VCMPEQUDCC V3,V4,V1
BGE CR6,different
LXVD2X (R5)(R10),V3 // load bytes of A at offset 16 into vector
LXVD2X (R6)(R10),V4 // load bytes of B at offset 16 into vector
VCMPEQUDCC V3,V4,V1
BGE CR6,different
ADD $-32,R9,R9 // reduce remaining size by 32
ADD $32,R5,R5 // increment to next 32 bytes of A
ADD $32,R6,R6 // increment to next 32 bytes of B
CMPU R9,$0
BNE rem // loop to rem if the remainder is not 0
BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
BLT CR2,less // jump to less if len(A)<len(B)
BR greater // jump to greater otherwise
rem:
MOVD R9,R8
ANDCC $24,R8,R9 // Any 8 byte chunks? ANDCC $24,R8,R9 // Any 8 byte chunks?
BEQ leftover // and result is 0 BEQ leftover // and result is 0
BR setup8a
different:
#ifdef GOARCH_ppc64le
MOVD $byteswap<>+00(SB), R16
LXVD2X (R16)(R0),SWAP // Set up swap string
VPERM V3,V3,SWAP,V3
VPERM V4,V4,SWAP,V4
#endif
MFVSRD VS35,R16 // move upper doublwords of A and B into GPR for comparison
MFVSRD VS36,R10
CMPU R16,R10
BEQ lower
BGT greater
MOVD $-1,R3 // return value if A < B
RET
lower:
VSLDOI $8,V3,V3,V3 // move lower doublwords of A and B into GPR for comparison
MFVSRD VS35,R16
VSLDOI $8,V4,V4,V4
MFVSRD VS36,R10
CMPU R16,R10
BGT greater
MOVD $-1,R3 // return value if A < B
RET
setup8a: setup8a:
SRADCC $3,R9,R9 // get the 8 byte count SRADCC $3,R8,R9 // get the 8 byte count
BEQ leftover // shifted value is 0 BEQ leftover // shifted value is 0
CMPU R8,$8 // optimize 8byte move
BEQ size8
CMPU R8,$16
BEQ size16
MOVD R9,CTR // loop count for doublewords MOVD R9,CTR // loop count for doublewords
loop8: loop8:
MOVDBR (R5+R0),R9 // doublewords to compare #ifdef GOARCH_ppc64le
MOVDBR (R5+R0),R16 // doublewords to compare
MOVDBR (R6+R0),R10 // LE compare order MOVDBR (R6+R0),R10 // LE compare order
#else
MOVD (R5+R0),R16 // doublewords to compare
MOVD (R6+R0),R10 // BE compare order
#endif
ADD $8,R5 ADD $8,R5
ADD $8,R6 ADD $8,R6
CMPU R9,R10 // match? CMPU R16,R10 // match?
BC 8,2,loop8 // bt ctr <> 0 && cr BC 8,2,loop8 // bt ctr <> 0 && cr
BGT greater BGT greater
BLT less BLT less
leftover: leftover:
ANDCC $7,R8,R9 // check for leftover bytes ANDCC $7,R8,R9 // check for leftover bytes
MOVD R9,CTR // save the ctr BEQ zeroremainder
BNE simple // leftover bytes
BC 12,10,equal // test CR2 for length comparison
BC 12,8,less
BR greater
simplecheck: simplecheck:
CMP R8,$0 // remaining compare length 0 MOVD R0,R14
BNE simple // do simple compare CMP R9,$4 // process 4 bytes
BC 12,10,equal // test CR2 for length comparison BLT halfword
BC 12,8,less // 1st len < 2nd len, result less #ifdef GOARCH_ppc64le
BR greater // 1st len > 2nd len must be greater MOVWBR (R5)(R14),R10
simple: MOVWBR (R6)(R14),R11
MOVBZ 0(R5), R9 // get byte from 1st operand #else
ADD $1,R5 MOVWZ (R5)(R14),R10
MOVBZ 0(R6), R10 // get byte from 2nd operand MOVWZ (R6)(R14),R11
ADD $1,R6 #endif
CMPU R9, R10 CMPU R10,R11
BC 8,2,simple // bc ctr <> 0 && cr BGT greater
BGT greater // 1st > 2nd BLT less
BLT less // 1st < 2nd ADD $-4,R9
BC 12,10,equal // test CR2 for length comparison ADD $4,R14
BC 12,9,greater // 2nd len > 1st len PCALIGN $16
BR less // must be less
cmpne: // only here is not equal halfword:
MOVDBR (R5+R16),R8 // reload in reverse order CMP R9,$2 // process 2 bytes
MOVDBR (R6+R16),R9 BLT byte
CMPU R8,R9 // compare correct endianness #ifdef GOARCH_ppc64le
BGT greater // here only if NE MOVHBR (R5)(R14),R10
less: MOVHBR (R6)(R14),R11
MOVD $-1, R3 // return value if A < B #else
MOVHZ (R5)(R14),R10
MOVHZ (R6)(R14),R11
#endif
CMPU R10,R11
BGT greater
BLT less
ADD $-2,R9
ADD $2,R14
PCALIGN $16
byte:
CMP R9,$0 // process 1 byte
BEQ skip
MOVBZ (R5)(R14),R10
MOVBZ (R6)(R14),R11
CMPU R10,R11
BGT greater
BLT less
PCALIGN $16
skip:
BEQ CR2,equal
BGT CR2,greater
less: MOVD $-1,R3 // return value if A < B
RET RET
size16:
LXVD2X (R5)(R0),V3 // load bytes of A at offset 0 into vector
LXVD2X (R6)(R0),V4 // load bytes of B at offset 0 into vector
VCMPEQUDCC V3,V4,V1
BGE CR6,different
zeroremainder:
BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
BLT CR2,less // jump to less if len(A)<len(B)
BR greater // jump to greater otherwise
size8:
#ifdef GOARCH_ppc64le
MOVDBR (R5+R0),R16 // doublewords to compare
MOVDBR (R6+R0),R10 // LE compare order
#else
MOVD (R5+R0),R16 // doublewords to compare
MOVD (R6+R0),R10 // BE compare order
#endif
CMPU R16,R10 // match?
BGT greater
BLT less
BGT CR2,greater // 2nd len > 1st len
BLT CR2,less // 2nd len < 1st len
equal: equal:
MOVD $0, R3 // return value if A == B MOVD $0, R3 // return value if A == B
RET RET
@ -179,100 +298,205 @@ greater:
MOVD $1,R3 // return value if A > B MOVD $1,R3 // return value if A > B
RET RET
// Do an efficient memcmp for ppc64 (BE) // Do an efficient memcmp for ppc64le/ppc64/POWER9
// R3 = a len // R3 = a len
// R4 = b len // R4 = b len
// R5 = a addr // R5 = a addr
// R6 = b addr // R6 = b addr
// On exit: // On exit:
// R3 = return value // R3 = return value
TEXT cmpbodyBE<>(SB),NOSPLIT|NOFRAME,$0-0 TEXT cmpbodyp9<>(SB),NOSPLIT|NOFRAME,$0-0
MOVD R3,R8 // set up length MOVD R3,R8 // set up length
CMP R3,R4,CR2 // unequal? CMP R3,R4,CR2 // unequal?
BC 12,8,setuplen // BLT CR2 BLT CR2,setuplen // BLT CR2
MOVD R4,R8 // use R4 for comparison len MOVD R4,R8 // use R4 for comparison len
setuplen: setuplen:
MOVD R8,CTR // set up loop counter CMP R8,$16 // optimize for size<16
CMP R8,$8 // only optimize >=8
BLT simplecheck
DCBT (R5) // cache hint
DCBT (R6)
CMP R8,$32 // optimize >= 32
MOVD R8,R9 MOVD R8,R9
BLT setup8a // 8 byte moves only BLT simplecheck
MOVD $16,R10 // set offsets to load into vectors
CMP R8,$32 // optimize for size 16-31
BLT cmp16
CMP R8,$64
BLT cmp32 // optimize for size 32-63
DCBT (R5) // optimize for size>=64
DCBT (R6) // cache hint
setup32a: MOVD $32,R11 // set offsets to load into vector
SRADCC $5,R8,R9 // number of 32 byte chunks MOVD $48,R12 // set offsets to load into vector
MOVD R9,CTR
loop32a: loop64a:// process size 64 and greater
MOVD 0(R5),R9 // doublewords to compare LXVB16X (R0)(R5),V3 // load bytes of A at offset 0 into vector
MOVD 0(R6),R10 // get 4 doublewords LXVB16X (R0)(R6),V4 // load bytes of B at offset 0 into vector
MOVD 8(R5),R14 VCMPNEBCC V3,V4,V1 // record comparison into V1
MOVD 8(R6),R15 BNE CR6,different // jump out if its different
CMPU R9,R10 // bytes equal?
BLT less // found to be less LXVB16X (R10)(R5),V3 // load bytes of A at offset 16 into vector
BGT greater // found to be greater LXVB16X (R10)(R6),V4 // load bytes of B at offset 16 into vector
MOVD 16(R5),R9 // get next pair of doublewords VCMPNEBCC V3,V4,V1
MOVD 16(R6),R10 BNE CR6,different
CMPU R14,R15 // bytes match?
BLT less // found less LXVB16X (R11)(R5),V3 // load bytes of A at offset 32 into vector
BGT greater // found greater LXVB16X (R11)(R6),V4 // load bytes of B at offset 32 into vector
MOVD 24(R5),R14 // get next pair of doublewords VCMPNEBCC V3,V4,V1
MOVD 24(R6),R15 BNE CR6,different
CMPU R9,R10 // bytes match?
BLT less // found to be less LXVB16X (R12)(R5),V3 // load bytes of A at offset 48 into vector
BGT greater // found to be greater LXVB16X (R12)(R6),V4 // load bytes of B at offset 48 into vector
ADD $32,R5 // bump up to next 32 VCMPNEBCC V3,V4,V1
ADD $32,R6 BNE CR6,different
CMPU R14,R15 // bytes match?
BC 8,2,loop32a // br ctr and cr ADD $-64,R9,R9 // reduce remaining size by 64
BLT less // with BE, byte ordering is ADD $64,R5,R5 // increment to next 64 bytes of A
BGT greater // good for compare ADD $64,R6,R6 // increment to next 64 bytes of B
ANDCC $24,R8,R9 // Any 8 byte chunks? CMPU R9,$64
BEQ leftover // and result is 0 BGE loop64a // loop back to loop64a only if there are >= 64 bytes remaining
setup8a:
SRADCC $3,R9,R9 // get the 8 byte count CMPU R9,$32
BEQ leftover // shifted value is 0 BGE cmp32 // loop to cmp32 if there are 32-64 bytes remaining
MOVD R9,CTR // loop count for doublewords CMPU R9,$16
loop8: BGE cmp16 // loop to cmp16 if there are 16-31 bytes left
MOVD (R5),R9 CMPU R9,$0
MOVD (R6),R10 BNE simplecheck // loop to simplecheck for remaining bytes
ADD $8,R5
ADD $8,R6 BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
CMPU R9,R10 // match? BLT CR2,less // jump to less if len(A)<len(B)
BC 8,2,loop8 // bt ctr <> 0 && cr BR greater // jump to greater otherwise
cmp32:
LXVB16X (R0)(R5),V3 // load bytes of A at offset 0 into vector
LXVB16X (R0)(R6),V4 // load bytes of B at offset 0 into vector
VCMPNEBCC V3,V4,V1 // record comparison into V1
BNE CR6,different // jump out if its different
LXVB16X (R10)(R5),V3 // load bytes of A at offset 16 into vector
LXVB16X (R10)(R6),V4 // load bytes of B at offset 16 into vector
VCMPNEBCC V3,V4,V1
BNE CR6,different
ADD $-32,R9,R9 // reduce remaining size by 32
ADD $32,R5,R5 // increment to next 32 bytes of A
ADD $32,R6,R6 // increment to next 32 bytes of B
CMPU R9,$16 // loop to cmp16 if there are 16-31 bytes left
BGE cmp16
CMPU R9,$0
BNE simplecheck // loop to simplecheck for remainder bytes
BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
BLT CR2,less // jump to less if len(A)<len(B)
BR greater // jump to greater otherwise
different:
MFVSRD VS35,R16 // move upper doublwords of A and B into GPR for comparison
MFVSRD VS36,R10
CMPU R16,R10
BEQ lower
BGT greater
MOVD $-1,R3 // return value if A < B
RET
lower:
MFVSRLD VS35,R16 // next move lower doublewords of A and B into GPR for comparison
MFVSRLD VS36,R10
CMPU R16,R10
BGT greater
MOVD $-1,R3 // return value if A < B
RET
greater:
MOVD $1,R3 // return value if A > B
RET
cmp16:
ANDCC $16,R9,R31
BEQ tail
LXVB16X (R0)(R5),V3 // load bytes of A at offset 16 into vector
LXVB16X (R0)(R6),V4 // load bytes of B at offset 16 into vector
VCMPEQUDCC V3,V4,V1
BGE CR6,different
ADD $16,R5
ADD $16,R6
tail:
ANDCC $15,R9 // Load the last 16 bytes (we know there are at least 32b)
BEQ end
ADD R9,R5
ADD R9,R6
MOVD $-16,R10
LXVB16X (R10)(R5),V3 // load bytes of A at offset 16 into vector
LXVB16X (R10)(R6),V4 // load bytes of B at offset 16 into vector
VCMPEQUDCC V3,V4,V1
BGE CR6,different
end:
BEQ CR2,equal // remainder is zero, jump to equal if len(A)==len(B)
BLT CR2,less // jump to less if BLT CR2 that is, len(A)<len(B)
BR greater // jump to greater otherwise
simplecheck:
MOVD $0,R14 // process 8 bytes
CMP R9,$8
BLT word
#ifdef GOARCH_ppc64le
MOVDBR (R5+R14),R10
MOVDBR (R6+R14),R11
#else
MOVD (R5+R14),R10
MOVD (R6+R14),R11
#endif
CMPU R10,R11
BGT greater BGT greater
BLT less BLT less
leftover: ADD $8,R14
ANDCC $7,R8,R9 // check for leftover bytes ADD $-8,R9
MOVD R9,CTR // save the ctr PCALIGN $16
BNE simple // leftover bytes word:
BC 12,10,equal // test CR2 for length comparison CMP R9,$4 // process 4 bytes
BC 12,8,less BLT halfword
BR greater #ifdef GOARCH_ppc64le
simplecheck: MOVWBR (R5+R14),R10
CMP R8,$0 // remaining compare length 0 MOVWBR (R6+R14),R11
BNE simple // do simple compare #else
BC 12,10,equal // test CR2 for length comparison MOVWZ (R5+R14),R10
BC 12,8,less // 1st len < 2nd len, result less MOVWZ (R6+R14),R11
BR greater // same len, must be equal #endif
simple: CMPU R10,R11
MOVBZ 0(R5),R9 // get byte from 1st operand BGT greater
ADD $1,R5 BLT less
MOVBZ 0(R6),R10 // get byte from 2nd operand ADD $4,R14
ADD $1,R6 ADD $-4,R9
CMPU R9,R10 PCALIGN $16
BC 8,2,simple // bc ctr <> 0 && cr halfword:
BGT greater // 1st > 2nd CMP R9,$2 // process 2 bytes
BLT less // 1st < 2nd BLT byte
BC 12,10,equal // test CR2 for length comparison #ifdef GOARCH_ppc64le
BC 12,9,greater // 2nd len > 1st len MOVHBR (R5+R14),R10
MOVHBR (R6+R14),R11
#else
MOVHZ (R5+R14),R10
MOVHZ (R6+R14),R11
#endif
CMPU R10,R11
BGT greater
BLT less
ADD $2,R14
ADD $-2,R9
PCALIGN $16
byte:
CMP R9,$0 // process 1 byte
BEQ skip
MOVBZ (R5+R14),R10
MOVBZ (R6+R14),R11
CMPU R10,R11
BGT greater
BLT less
PCALIGN $16
skip:
BEQ CR2,equal
BGT CR2,greater
less: less:
MOVD $-1,R3 // return value if A < B MOVD $-1,R3 // return value if A < B
RET RET
equal: equal:
MOVD $0, R3 // return value if A == B MOVD $0, R3 // return value if A == B
RET RET
greater:
MOVD $1, R3 // return value if A > B
RET

View file

@ -5,161 +5,179 @@
#include "go_asm.h" #include "go_asm.h"
#include "textflag.h" #include "textflag.h"
TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56 TEXT ·Compare<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-56
MOV a_base+0(FP), X5 #ifndef GOEXPERIMENT_regabiargs
MOV a_len+8(FP), X6 MOV a_base+0(FP), X10
MOV b_base+24(FP), X7 MOV a_len+8(FP), X11
MOV b_len+32(FP), X8 MOV b_base+24(FP), X12
MOV $ret+48(FP), X9 MOV b_len+32(FP), X13
MOV $ret+48(FP), X14
#else
// X10 = a_base
// X11 = a_len
// X12 = a_cap (unused)
// X13 = b_base (want in X12)
// X14 = b_len (want in X13)
// X15 = b_cap (unused)
MOV X13, X12
MOV X14, X13
#endif
JMP compare<>(SB) JMP compare<>(SB)
TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40 TEXT runtime·cmpstring<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-40
MOV a_base+0(FP), X5 #ifndef GOEXPERIMENT_regabiargs
MOV a_len+8(FP), X6 MOV a_base+0(FP), X10
MOV b_base+16(FP), X7 MOV a_len+8(FP), X11
MOV b_len+24(FP), X8 MOV b_base+16(FP), X12
MOV $ret+32(FP), X9 MOV b_len+24(FP), X13
MOV $ret+32(FP), X14
#endif
// X10 = a_base
// X11 = a_len
// X12 = b_base
// X13 = b_len
JMP compare<>(SB) JMP compare<>(SB)
// On entry: // On entry:
// X5 points to start of a // X10 points to start of a
// X6 length of a // X11 length of a
// X7 points to start of b // X12 points to start of b
// X8 length of b // X13 length of b
// X9 points to the address to store the return value (-1/0/1) // for non-regabi X14 points to the address to store the return value (-1/0/1)
// for regabi the return value in X10
TEXT compare<>(SB),NOSPLIT|NOFRAME,$0 TEXT compare<>(SB),NOSPLIT|NOFRAME,$0
BEQ X5, X7, cmp_len BEQ X10, X12, cmp_len
MOV X6, X10 MOV X11, X5
BGE X8, X10, use_a_len // X10 = min(len(a), len(b)) BGE X13, X5, use_a_len // X5 = min(len(a), len(b))
MOV X8, X10 MOV X13, X5
use_a_len: use_a_len:
BEQZ X10, cmp_len BEQZ X5, cmp_len
MOV $32, X11 MOV $32, X6
BLT X10, X11, loop4_check BLT X5, X6, loop4_check
// Check alignment - if alignment differs we have to do one byte at a time. // Check alignment - if alignment differs we have to do one byte at a time.
AND $3, X5, X12 AND $3, X10, X7
AND $3, X7, X13 AND $3, X12, X8
BNE X12, X13, loop4_check BNE X7, X8, loop4_check
BEQZ X12, loop32_check BEQZ X7, loop32_check
// Check one byte at a time until we reach 8 byte alignment. // Check one byte at a time until we reach 8 byte alignment.
SUB X12, X10, X10 SUB X7, X5, X5
align: align:
ADD $-1, X12 ADD $-1, X7
MOVBU 0(X5), X13 MOVBU 0(X10), X8
MOVBU 0(X7), X14 MOVBU 0(X12), X9
BNE X13, X14, cmp BNE X8, X9, cmp
ADD $1, X5 ADD $1, X10
ADD $1, X7 ADD $1, X12
BNEZ X12, align BNEZ X7, align
loop32_check: loop32_check:
MOV $32, X12 MOV $32, X7
BLT X10, X12, loop16_check BLT X5, X7, loop16_check
loop32: loop32:
MOV 0(X5), X15 MOV 0(X10), X15
MOV 0(X7), X16 MOV 0(X12), X16
MOV 8(X5), X17 MOV 8(X10), X17
MOV 8(X7), X18 MOV 8(X12), X18
BEQ X15, X16, loop32a BEQ X15, X16, loop32a
JMP cmp8a JMP cmp8a
loop32a: loop32a:
BEQ X17, X18, loop32b BEQ X17, X18, loop32b
JMP cmp8b JMP cmp8b
loop32b: loop32b:
MOV 16(X5), X15 MOV 16(X10), X15
MOV 16(X7), X16 MOV 16(X12), X16
MOV 24(X5), X17 MOV 24(X10), X17
MOV 24(X7), X18 MOV 24(X12), X18
BEQ X15, X16, loop32c BEQ X15, X16, loop32c
JMP cmp8a JMP cmp8a
loop32c: loop32c:
BEQ X17, X18, loop32d BEQ X17, X18, loop32d
JMP cmp8b JMP cmp8b
loop32d: loop32d:
ADD $32, X5 ADD $32, X10
ADD $32, X7 ADD $32, X12
ADD $-32, X10 ADD $-32, X5
BGE X10, X12, loop32 BGE X5, X7, loop32
BEQZ X10, cmp_len BEQZ X5, cmp_len
loop16_check: loop16_check:
MOV $16, X11 MOV $16, X6
BLT X10, X11, loop4_check BLT X5, X6, loop4_check
loop16: loop16:
MOV 0(X5), X15 MOV 0(X10), X15
MOV 0(X7), X16 MOV 0(X12), X16
MOV 8(X5), X17 MOV 8(X10), X17
MOV 8(X7), X18 MOV 8(X12), X18
BEQ X15, X16, loop16a BEQ X15, X16, loop16a
JMP cmp8a JMP cmp8a
loop16a: loop16a:
BEQ X17, X18, loop16b BEQ X17, X18, loop16b
JMP cmp8b JMP cmp8b
loop16b: loop16b:
ADD $16, X5 ADD $16, X10
ADD $16, X7 ADD $16, X12
ADD $-16, X10 ADD $-16, X5
BGE X10, X11, loop16 BGE X5, X6, loop16
BEQZ X10, cmp_len BEQZ X5, cmp_len
loop4_check: loop4_check:
MOV $4, X11 MOV $4, X6
BLT X10, X11, loop1 BLT X5, X6, loop1
loop4: loop4:
MOVBU 0(X5), X13 MOVBU 0(X10), X8
MOVBU 0(X7), X14 MOVBU 0(X12), X9
MOVBU 1(X5), X15 MOVBU 1(X10), X15
MOVBU 1(X7), X16 MOVBU 1(X12), X16
BEQ X13, X14, loop4a BEQ X8, X9, loop4a
SLTU X14, X13, X10 SLTU X9, X8, X5
SLTU X13, X14, X11 SLTU X8, X9, X6
JMP cmp_ret JMP cmp_ret
loop4a: loop4a:
BEQ X15, X16, loop4b BEQ X15, X16, loop4b
SLTU X16, X15, X10 SLTU X16, X15, X5
SLTU X15, X16, X11 SLTU X15, X16, X6
JMP cmp_ret JMP cmp_ret
loop4b: loop4b:
MOVBU 2(X5), X21 MOVBU 2(X10), X21
MOVBU 2(X7), X22 MOVBU 2(X12), X22
MOVBU 3(X5), X23 MOVBU 3(X10), X23
MOVBU 3(X7), X24 MOVBU 3(X12), X24
BEQ X21, X22, loop4c BEQ X21, X22, loop4c
SLTU X22, X21, X10 SLTU X22, X21, X5
SLTU X21, X22, X11 SLTU X21, X22, X6
JMP cmp_ret JMP cmp_ret
loop4c: loop4c:
BEQ X23, X24, loop4d BEQ X23, X24, loop4d
SLTU X24, X23, X10 SLTU X24, X23, X5
SLTU X23, X24, X11 SLTU X23, X24, X6
JMP cmp_ret JMP cmp_ret
loop4d: loop4d:
ADD $4, X5 ADD $4, X10
ADD $4, X7 ADD $4, X12
ADD $-4, X10 ADD $-4, X5
BGE X10, X11, loop4 BGE X5, X6, loop4
loop1: loop1:
BEQZ X10, cmp_len BEQZ X5, cmp_len
MOVBU 0(X5), X13 MOVBU 0(X10), X8
MOVBU 0(X7), X14 MOVBU 0(X12), X9
BNE X13, X14, cmp BNE X8, X9, cmp
ADD $1, X5 ADD $1, X10
ADD $1, X7 ADD $1, X12
ADD $-1, X10 ADD $-1, X5
JMP loop1 JMP loop1
// Compare 8 bytes of memory in X15/X16 that are known to differ. // Compare 8 bytes of memory in X15/X16 that are known to differ.
cmp8a: cmp8a:
MOV $0xff, X19 MOV $0xff, X19
cmp8a_loop: cmp8a_loop:
AND X15, X19, X13 AND X15, X19, X8
AND X16, X19, X14 AND X16, X19, X9
BNE X13, X14, cmp BNE X8, X9, cmp
SLLI $8, X19 SLLI $8, X19
JMP cmp8a_loop JMP cmp8a_loop
@ -167,19 +185,21 @@ cmp8a_loop:
cmp8b: cmp8b:
MOV $0xff, X19 MOV $0xff, X19
cmp8b_loop: cmp8b_loop:
AND X17, X19, X13 AND X17, X19, X8
AND X18, X19, X14 AND X18, X19, X9
BNE X13, X14, cmp BNE X8, X9, cmp
SLLI $8, X19 SLLI $8, X19
JMP cmp8b_loop JMP cmp8b_loop
cmp_len: cmp_len:
MOV X6, X13 MOV X11, X8
MOV X8, X14 MOV X13, X9
cmp: cmp:
SLTU X14, X13, X10 SLTU X9, X8, X5
SLTU X13, X14, X11 SLTU X8, X9, X6
cmp_ret: cmp_ret:
SUB X10, X11, X12 SUB X5, X6, X10
MOV X12, (X9) #ifndef GOEXPERIMENT_regabiargs
MOV X10, (X14)
#endif
RET RET

Some files were not shown because too many files have changed in this diff Show more