aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/reflect
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@google.com>2016-02-03 21:58:02 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-02-03 21:58:02 +0000
commitf98dd1a338867a408f7c72d73fbad7fe7fc93e3a (patch)
tree2f8da9862a9c1fe0df138917f997b03439c02773 /libgo/go/reflect
parentb081ed4efc144da0c45a6484aebfd10e0eb9fda3 (diff)
downloadgcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.zip
gcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.tar.gz
gcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.tar.bz2
libgo: Update to go1.6rc1.
Reviewed-on: https://go-review.googlesource.com/19200 From-SVN: r233110
Diffstat (limited to 'libgo/go/reflect')
-rw-r--r--libgo/go/reflect/all_test.go238
-rw-r--r--libgo/go/reflect/deepequal.go87
-rw-r--r--libgo/go/reflect/type.go29
-rw-r--r--libgo/go/reflect/value.go22
4 files changed, 345 insertions, 31 deletions
diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go
index 33ee9ed..595d690 100644
--- a/libgo/go/reflect/all_test.go
+++ b/libgo/go/reflect/all_test.go
@@ -10,6 +10,7 @@ import (
"flag"
"fmt"
"io"
+ "math"
"math/rand"
"os"
. "reflect"
@@ -647,6 +648,8 @@ var (
fn3 = func() { fn1() } // Not nil.
)
+type self struct{}
+
var deepEqualTests = []DeepEqualTest{
// Equalities
{nil, nil, true},
@@ -681,6 +684,13 @@ var deepEqualTests = []DeepEqualTest{
{fn1, fn3, false},
{fn3, fn3, false},
{[][]int{{1}}, [][]int{{2}}, false},
+ {math.NaN(), math.NaN(), false},
+ {&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
+ {&[1]float64{math.NaN()}, self{}, true},
+ {[]float64{math.NaN()}, []float64{math.NaN()}, false},
+ {[]float64{math.NaN()}, self{}, true},
+ {map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
+ {map[float64]float64{math.NaN(): 1}, self{}, true},
// Nil vs empty: not the same.
{[]int{}, []int(nil), false},
@@ -702,6 +712,9 @@ var deepEqualTests = []DeepEqualTest{
func TestDeepEqual(t *testing.T) {
for _, test := range deepEqualTests {
+ if test.b == (self{}) {
+ test.b = test.a
+ }
if r := DeepEqual(test.a, test.b); r != test.eq {
t.Errorf("DeepEqual(%v, %v) = %v, want %v", test.a, test.b, r, test.eq)
}
@@ -2783,14 +2796,27 @@ func TestSetBytes(t *testing.T) {
type Private struct {
x int
y **int
+ Z int
}
func (p *Private) m() {
}
+type private struct {
+ Z int
+ z int
+ S string
+ A [1]Private
+ T []Private
+}
+
+func (p *private) P() {
+}
+
type Public struct {
X int
Y **int
+ private
}
func (p *Public) M() {
@@ -2798,17 +2824,30 @@ func (p *Public) M() {
func TestUnexported(t *testing.T) {
var pub Public
+ pub.S = "S"
+ pub.T = pub.A[:]
v := ValueOf(&pub)
isValid(v.Elem().Field(0))
isValid(v.Elem().Field(1))
+ isValid(v.Elem().Field(2))
isValid(v.Elem().FieldByName("X"))
isValid(v.Elem().FieldByName("Y"))
+ isValid(v.Elem().FieldByName("Z"))
isValid(v.Type().Method(0).Func)
+ m, _ := v.Type().MethodByName("M")
+ isValid(m.Func)
+ m, _ = v.Type().MethodByName("P")
+ isValid(m.Func)
isNonNil(v.Elem().Field(0).Interface())
isNonNil(v.Elem().Field(1).Interface())
+ isNonNil(v.Elem().Field(2).Field(2).Index(0))
isNonNil(v.Elem().FieldByName("X").Interface())
isNonNil(v.Elem().FieldByName("Y").Interface())
+ isNonNil(v.Elem().FieldByName("Z").Interface())
+ isNonNil(v.Elem().FieldByName("S").Index(0).Interface())
isNonNil(v.Type().Method(0).Func.Interface())
+ m, _ = v.Type().MethodByName("P")
+ isNonNil(m.Func.Interface())
var priv Private
v = ValueOf(&priv)
@@ -2824,6 +2863,170 @@ func TestUnexported(t *testing.T) {
shouldPanic(func() { v.Type().Method(0).Func.Interface() })
}
+func TestSetPanic(t *testing.T) {
+ ok := func(f func()) { f() }
+ bad := shouldPanic
+ clear := func(v Value) { v.Set(Zero(v.Type())) }
+
+ type t0 struct {
+ W int
+ }
+
+ type t1 struct {
+ Y int
+ t0
+ }
+
+ type T2 struct {
+ Z int
+ namedT0 t0
+ }
+
+ type T struct {
+ X int
+ t1
+ T2
+ NamedT1 t1
+ NamedT2 T2
+ namedT1 t1
+ namedT2 T2
+ }
+
+ // not addressable
+ v := ValueOf(T{})
+ bad(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ bad(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ bad(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ bad(func() { clear(v.Field(2)) }) // .T2
+ bad(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ bad(func() { clear(v.Field(3)) }) // .NamedT1
+ bad(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ bad(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ bad(func() { clear(v.Field(4)) }) // .NamedT2
+ bad(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+
+ // addressable
+ v = ValueOf(&T{}).Elem()
+ ok(func() { clear(v.Field(0)) }) // .X
+ bad(func() { clear(v.Field(1)) }) // .t1
+ ok(func() { clear(v.Field(1).Field(0)) }) // .t1.Y
+ bad(func() { clear(v.Field(1).Field(1)) }) // .t1.t0
+ ok(func() { clear(v.Field(1).Field(1).Field(0)) }) // .t1.t0.W
+ ok(func() { clear(v.Field(2)) }) // .T2
+ ok(func() { clear(v.Field(2).Field(0)) }) // .T2.Z
+ bad(func() { clear(v.Field(2).Field(1)) }) // .T2.namedT0
+ bad(func() { clear(v.Field(2).Field(1).Field(0)) }) // .T2.namedT0.W
+ ok(func() { clear(v.Field(3)) }) // .NamedT1
+ ok(func() { clear(v.Field(3).Field(0)) }) // .NamedT1.Y
+ bad(func() { clear(v.Field(3).Field(1)) }) // .NamedT1.t0
+ ok(func() { clear(v.Field(3).Field(1).Field(0)) }) // .NamedT1.t0.W
+ ok(func() { clear(v.Field(4)) }) // .NamedT2
+ ok(func() { clear(v.Field(4).Field(0)) }) // .NamedT2.Z
+ bad(func() { clear(v.Field(4).Field(1)) }) // .NamedT2.namedT0
+ bad(func() { clear(v.Field(4).Field(1).Field(0)) }) // .NamedT2.namedT0.W
+ bad(func() { clear(v.Field(5)) }) // .namedT1
+ bad(func() { clear(v.Field(5).Field(0)) }) // .namedT1.Y
+ bad(func() { clear(v.Field(5).Field(1)) }) // .namedT1.t0
+ bad(func() { clear(v.Field(5).Field(1).Field(0)) }) // .namedT1.t0.W
+ bad(func() { clear(v.Field(6)) }) // .namedT2
+ bad(func() { clear(v.Field(6).Field(0)) }) // .namedT2.Z
+ bad(func() { clear(v.Field(6).Field(1)) }) // .namedT2.namedT0
+ bad(func() { clear(v.Field(6).Field(1).Field(0)) }) // .namedT2.namedT0.W
+}
+
+type timp int
+
+func (t timp) W() {}
+func (t timp) Y() {}
+func (t timp) w() {}
+func (t timp) y() {}
+
+func TestCallPanic(t *testing.T) {
+ type t0 interface {
+ W()
+ w()
+ }
+ type T1 interface {
+ Y()
+ y()
+ }
+ type T2 struct {
+ T1
+ t0
+ }
+ type T struct {
+ t0 // 0
+ T1 // 1
+
+ NamedT0 t0 // 2
+ NamedT1 T1 // 3
+ NamedT2 T2 // 4
+
+ namedT0 t0 // 5
+ namedT1 T1 // 6
+ namedT2 T2 // 7
+ }
+ ok := func(f func()) { f() }
+ bad := shouldPanic
+ call := func(v Value) { v.Call(nil) }
+
+ i := timp(0)
+ v := ValueOf(T{i, i, i, i, T2{i, i}, i, i, T2{i, i}})
+ ok(func() { call(v.Field(0).Method(0)) }) // .t0.W
+ ok(func() { call(v.Field(0).Elem().Method(0)) }) // .t0.W
+ bad(func() { call(v.Field(0).Method(1)) }) // .t0.w
+ bad(func() { call(v.Field(0).Elem().Method(2)) }) // .t0.w
+ ok(func() { call(v.Field(1).Method(0)) }) // .T1.Y
+ ok(func() { call(v.Field(1).Elem().Method(0)) }) // .T1.Y
+ bad(func() { call(v.Field(1).Method(1)) }) // .T1.y
+ bad(func() { call(v.Field(1).Elem().Method(2)) }) // .T1.y
+
+ ok(func() { call(v.Field(2).Method(0)) }) // .NamedT0.W
+ ok(func() { call(v.Field(2).Elem().Method(0)) }) // .NamedT0.W
+ bad(func() { call(v.Field(2).Method(1)) }) // .NamedT0.w
+ bad(func() { call(v.Field(2).Elem().Method(2)) }) // .NamedT0.w
+
+ ok(func() { call(v.Field(3).Method(0)) }) // .NamedT1.Y
+ ok(func() { call(v.Field(3).Elem().Method(0)) }) // .NamedT1.Y
+ bad(func() { call(v.Field(3).Method(1)) }) // .NamedT1.y
+ bad(func() { call(v.Field(3).Elem().Method(3)) }) // .NamedT1.y
+
+ ok(func() { call(v.Field(4).Field(0).Method(0)) }) // .NamedT2.T1.Y
+ ok(func() { call(v.Field(4).Field(0).Elem().Method(0)) }) // .NamedT2.T1.W
+ ok(func() { call(v.Field(4).Field(1).Method(0)) }) // .NamedT2.t0.W
+ ok(func() { call(v.Field(4).Field(1).Elem().Method(0)) }) // .NamedT2.t0.W
+
+ bad(func() { call(v.Field(5).Method(0)) }) // .namedT0.W
+ bad(func() { call(v.Field(5).Elem().Method(0)) }) // .namedT0.W
+ bad(func() { call(v.Field(5).Method(1)) }) // .namedT0.w
+ bad(func() { call(v.Field(5).Elem().Method(2)) }) // .namedT0.w
+
+ bad(func() { call(v.Field(6).Method(0)) }) // .namedT1.Y
+ bad(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.Y
+ bad(func() { call(v.Field(6).Method(0)) }) // .namedT1.y
+ bad(func() { call(v.Field(6).Elem().Method(0)) }) // .namedT1.y
+
+ bad(func() { call(v.Field(7).Field(0).Method(0)) }) // .namedT2.T1.Y
+ bad(func() { call(v.Field(7).Field(0).Elem().Method(0)) }) // .namedT2.T1.W
+ bad(func() { call(v.Field(7).Field(1).Method(0)) }) // .namedT2.t0.W
+ bad(func() { call(v.Field(7).Field(1).Elem().Method(0)) }) // .namedT2.t0.W
+}
+
func shouldPanic(f func()) {
defer func() {
if recover() == nil {
@@ -4786,3 +4989,38 @@ func TestPtrToMethods(t *testing.T) {
t.Fatal("does not implement Stringer, but should")
}
}
+
+func TestMapAlloc(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("skipping on gccgo until we have escape analysis")
+ }
+ m := ValueOf(make(map[int]int, 10))
+ k := ValueOf(5)
+ v := ValueOf(7)
+ allocs := testing.AllocsPerRun(100, func() {
+ m.SetMapIndex(k, v)
+ })
+ if allocs > 0.5 {
+ t.Errorf("allocs per map assignment: want 0 got %f", allocs)
+ }
+}
+
+func TestChanAlloc(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("skipping on gccgo until we have escape analysis")
+ }
+ // Note: for a chan int, the return Value must be allocated, so we
+ // use a chan *int instead.
+ c := ValueOf(make(chan *int, 1))
+ v := ValueOf(new(int))
+ allocs := testing.AllocsPerRun(100, func() {
+ c.Send(v)
+ _, _ = c.Recv()
+ })
+ if allocs < 0.5 || allocs > 1.5 {
+ t.Errorf("allocs per chan send/recv: want 1 got %f", allocs)
+ }
+ // Note: there is one allocation in reflect.recv which seems to be
+ // a limitation of escape analysis. If that is ever fixed the
+ // allocs < 0.5 condition will trigger and this test should be fixed.
+}
diff --git a/libgo/go/reflect/deepequal.go b/libgo/go/reflect/deepequal.go
index f63715c..3743e80 100644
--- a/libgo/go/reflect/deepequal.go
+++ b/libgo/go/reflect/deepequal.go
@@ -6,13 +6,15 @@
package reflect
+import "unsafe"
+
// During deepValueEqual, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
// checks in progress are true when it reencounters them.
// Visited comparisons are stored in a map indexed by visit.
type visit struct {
- a1 uintptr
- a2 uintptr
+ a1 unsafe.Pointer
+ a2 unsafe.Pointer
typ Type
}
@@ -37,19 +39,15 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
}
if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
- addr1 := v1.UnsafeAddr()
- addr2 := v2.UnsafeAddr()
- if addr1 > addr2 {
+ addr1 := unsafe.Pointer(v1.UnsafeAddr())
+ addr2 := unsafe.Pointer(v2.UnsafeAddr())
+ if uintptr(addr1) > uintptr(addr2) {
// Canonicalize order to reduce number of entries in visited.
+ // Assumes non-moving garbage collector.
addr1, addr2 = addr2, addr1
}
- // Short circuit if references are identical ...
- if addr1 == addr2 {
- return true
- }
-
- // ... or already seen
+ // Short circuit if references are already seen.
typ := v1.Type()
v := visit{addr1, addr2, typ}
if visited[v] {
@@ -90,6 +88,9 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
}
return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
case Ptr:
+ if v1.Pointer() == v2.Pointer() {
+ return true
+ }
return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
case Struct:
for i, n := 0, v1.NumField(); i < n; i++ {
@@ -109,7 +110,9 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
return true
}
for _, k := range v1.MapKeys() {
- if !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+ val1 := v1.MapIndex(k)
+ val2 := v2.MapIndex(k)
+ if !val1.IsValid() || !val2.IsValid() || !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
return false
}
}
@@ -126,18 +129,56 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
}
}
-// DeepEqual tests for deep equality. It uses normal == equality where
-// possible but will scan elements of arrays, slices, maps, and fields of
-// structs. In maps, keys are compared with == but elements use deep
-// equality. DeepEqual correctly handles recursive types. Functions are equal
-// only if they are both nil.
-// An empty slice is not equal to a nil slice.
-func DeepEqual(a1, a2 interface{}) bool {
- if a1 == nil || a2 == nil {
- return a1 == a2
+// DeepEqual reports whether x and y are ``deeply equal,'' defined as follows.
+// Two values of identical type are deeply equal if one of the following cases applies.
+// Values of distinct types are never deeply equal.
+//
+// Array values are deeply equal when their corresponding elements are deeply equal.
+//
+// Struct values are deeply equal if their corresponding fields,
+// both exported and unexported, are deeply equal.
+//
+// Func values are deeply equal if both are nil; otherwise they are not deeply equal.
+//
+// Interface values are deeply equal if they hold deeply equal concrete values.
+//
+// Map values are deeply equal if they are the same map object
+// or if they have the same length and their corresponding keys
+// (matched using Go equality) map to deeply equal values.
+//
+// Pointer values are deeply equal if they are equal using Go's == operator
+// or if they point to deeply equal values.
+//
+// Slice values are deeply equal when all of the following are true:
+// they are both nil or both non-nil, they have the same length,
+// and either they point to the same initial entry of the same underlying array
+// (that is, &x[0] == &y[0]) or their corresponding elements (up to length) are deeply equal.
+// Note that a non-nil empty slice and a nil slice (for example, []byte{} and []byte(nil))
+// are not deeply equal.
+//
+// Other values - numbers, bools, strings, and channels - are deeply equal
+// if they are equal using Go's == operator.
+//
+// In general DeepEqual is a recursive relaxation of Go's == operator.
+// However, this idea is impossible to implement without some inconsistency.
+// Specifically, it is possible for a value to be unequal to itself,
+// either because it is of func type (uncomparable in general)
+// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
+// or because it is an array, struct, or interface containing
+// such a value.
+// On the other hand, pointer values are always equal to themselves,
+// even if they point at or contain such problematic values,
+// because they compare equal using Go's == operator, and that
+// is a sufficient condition to be deeply equal, regardless of content.
+// DeepEqual has been defined so that the same short-cut applies
+// to slices and maps: if x and y are the same slice or the same map,
+// they are deeply equal regardless of content.
+func DeepEqual(x, y interface{}) bool {
+ if x == nil || y == nil {
+ return x == y
}
- v1 := ValueOf(a1)
- v2 := ValueOf(a2)
+ v1 := ValueOf(x)
+ v2 := ValueOf(y)
if v1.Type() != v2.Type() {
return false
}
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 180a364..88da632 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -756,10 +756,10 @@ func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
// A StructField describes a single field in a struct.
type StructField struct {
// Name is the field name.
+ Name string
// PkgPath is the package path that qualifies a lower case (unexported)
// field name. It is empty for upper case (exported) field names.
// See https://golang.org/ref/spec#Uniqueness_of_identifiers
- Name string
PkgPath string
Type Type // field type
@@ -1733,6 +1733,33 @@ func isReflexive(t *rtype) bool {
}
}
+// needKeyUpdate reports whether map overwrites require the key to be copied.
+func needKeyUpdate(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
+ return false
+ case Float32, Float64, Complex64, Complex128, Interface, String:
+ // Float keys can be updated from +0 to -0.
+ // String keys can be updated to use a smaller backing store.
+ // Interfaces might have floats of strings in them.
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return needKeyUpdate(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if needKeyUpdate(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ // Func, Map, Slice, Invalid
+ panic("needKeyUpdate called on non-key type " + t.String())
+ }
+}
+
// Make sure these routines stay in sync with ../../runtime/hashmap.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go
index 8374370..0f0eb84 100644
--- a/libgo/go/reflect/value.go
+++ b/libgo/go/reflect/value.go
@@ -143,7 +143,7 @@ func unpackEface(i interface{}) Value {
if ifaceIndir(t) {
f |= flagIndir
}
- return Value{t, unsafe.Pointer(e.word), f}
+ return Value{t, e.word, f}
}
// A ValueError occurs when a Value method is invoked on
@@ -507,7 +507,7 @@ func storeRcvr(v Value, p unsafe.Pointer) {
if t.Kind() == Interface {
// the interface data word becomes the receiver word
iface := (*nonEmptyInterface)(v.ptr)
- *(*unsafe.Pointer)(p) = unsafe.Pointer(iface.word)
+ *(*unsafe.Pointer)(p) = iface.word
} else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
*(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
} else {
@@ -1958,11 +1958,10 @@ func ValueOf(i interface{}) Value {
return Value{}
}
- // TODO(rsc): Eliminate this terrible hack.
- // In the call to unpackEface, i.typ doesn't escape,
- // and i.word is an integer. So it looks like
- // i doesn't escape. But really it does,
- // because i.word is actually a pointer.
+ // TODO: Maybe allow contents of a Value to live on the stack.
+ // For now we make the contents always escape to the heap. It
+ // makes life easier in a few places (see chanrecv/mapassign
+ // comment below).
escapes(i)
return unpackEface(i)
@@ -2318,6 +2317,14 @@ func chancap(ch unsafe.Pointer) int
func chanclose(ch unsafe.Pointer)
func chanlen(ch unsafe.Pointer) int
+// Note: some of the noescape annotations below are technically a lie,
+// but safe in the context of this package. Functions like chansend
+// and mapassign don't escape the referent, but may escape anything
+// the referent points to (they do shallow copies of the referent).
+// It is safe in this package because the referent may only point
+// to something a Value may point to, and that is always in the heap
+// (due to the escapes() call in ValueOf).
+
//go:noescape
func chanrecv(t *rtype, ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, received bool)
@@ -2330,6 +2337,7 @@ func makemap(t *rtype) (m unsafe.Pointer)
//go:noescape
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+//go:noescape
func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
//go:noescape