aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/reflect
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2017-09-14 17:11:35 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-09-14 17:11:35 +0000
commitbc998d034f45d1828a8663b2eed928faf22a7d01 (patch)
tree8d262a22ca7318f4bcd64269fe8fe9e45bcf8d0f /libgo/go/reflect
parenta41a6142df74219f596e612d3a7775f68ca6e96f (diff)
downloadgcc-bc998d034f45d1828a8663b2eed928faf22a7d01.zip
gcc-bc998d034f45d1828a8663b2eed928faf22a7d01.tar.gz
gcc-bc998d034f45d1828a8663b2eed928faf22a7d01.tar.bz2
libgo: update to go1.9
Reviewed-on: https://go-review.googlesource.com/63753 From-SVN: r252767
Diffstat (limited to 'libgo/go/reflect')
-rw-r--r--libgo/go/reflect/all_test.go292
-rw-r--r--libgo/go/reflect/deepequal.go6
-rw-r--r--libgo/go/reflect/set_test.go18
-rw-r--r--libgo/go/reflect/type.go429
-rw-r--r--libgo/go/reflect/value.go21
5 files changed, 471 insertions, 295 deletions
diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go
index 6ac3352..7364673 100644
--- a/libgo/go/reflect/all_test.go
+++ b/libgo/go/reflect/all_test.go
@@ -1576,9 +1576,11 @@ func BenchmarkCallArgCopy(b *testing.B) {
args := []Value{size.arg}
b.SetBytes(int64(size.arg.Len()))
b.ResetTimer()
- for i := 0; i < b.N; i++ {
- size.fv.Call(args)
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ size.fv.Call(args)
+ }
+ })
}
name := fmt.Sprintf("size=%v", size.arg.Len())
b.Run(name, bench)
@@ -2559,6 +2561,28 @@ func TestPtrToGC(t *testing.T) {
}
}
+func BenchmarkPtrTo(b *testing.B) {
+ // Construct a type with a zero ptrToThis.
+ type T struct{ int }
+ t := SliceOf(TypeOf(T{}))
+ ptrToThis := ValueOf(t).Elem().FieldByName("ptrToThis")
+ if !ptrToThis.IsValid() {
+ b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
+ }
+ if ptrToThis.Int() != 0 {
+ b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
+ }
+ b.ResetTimer()
+
+ // Now benchmark calling PtrTo on it: we'll have to hit the ptrMap cache on
+ // every call.
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ PtrTo(t)
+ }
+ })
+}
+
func TestAddr(t *testing.T) {
var p struct {
X, Y int
@@ -3738,7 +3762,7 @@ func checkSameType(t *testing.T, x, y interface{}) {
func TestArrayOf(t *testing.T) {
// check construction and use of type not in binary
- for _, table := range []struct {
+ tests := []struct {
n int
value func(i int) interface{}
comparable bool
@@ -3816,7 +3840,9 @@ func TestArrayOf(t *testing.T) {
comparable: true,
want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
},
- } {
+ }
+
+ for _, table := range tests {
at := ArrayOf(table.n, TypeOf(table.value(0)))
v := New(at).Elem()
vok := New(at).Elem()
@@ -4045,6 +4071,54 @@ func TestSliceOfGC(t *testing.T) {
}
}
+func TestStructOfFieldName(t *testing.T) {
+ // invalid field name "1nvalid"
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "valid", Type: TypeOf("")},
+ StructField{Name: "1nvalid", Type: TypeOf("")},
+ })
+ })
+
+ // invalid field name "+"
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "val1d", Type: TypeOf("")},
+ StructField{Name: "+", Type: TypeOf("")},
+ })
+ })
+
+ // no field name
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "", Type: TypeOf("")},
+ })
+ })
+
+ // verify creation of a struct with valid struct fields
+ validFields := []StructField{
+ StructField{
+ Name: "φ",
+ Type: TypeOf(""),
+ },
+ StructField{
+ Name: "ValidName",
+ Type: TypeOf(""),
+ },
+ StructField{
+ Name: "Val1dNam5",
+ Type: TypeOf(""),
+ },
+ }
+
+ validStruct := StructOf(validFields)
+
+ const structStr = `struct { φ string; ValidName string; Val1dNam5 string }`
+ if got, want := validStruct.String(), structStr; got != want {
+ t.Errorf("StructOf(validFields).String()=%q, want %q", got, want)
+ }
+}
+
func TestStructOf(t *testing.T) {
// check construction and use of type not in binary
fields := []StructField{
@@ -4392,7 +4466,7 @@ func TestStructOfGenericAlg(t *testing.T) {
{Name: "S1", Type: st1},
})
- for _, table := range []struct {
+ tests := []struct {
rt Type
idx []int
}{
@@ -4473,7 +4547,9 @@ func TestStructOfGenericAlg(t *testing.T) {
),
idx: []int{2},
},
- } {
+ }
+
+ for _, table := range tests {
v1 := New(table.rt).Elem()
v2 := New(table.rt).Elem()
@@ -4582,18 +4658,21 @@ func TestStructOfWithInterface(t *testing.T) {
type Iface interface {
Get() int
}
- for i, table := range []struct {
+ tests := []struct {
+ name string
typ Type
val Value
impl bool
}{
{
+ name: "StructI",
typ: TypeOf(StructI(want)),
val: ValueOf(StructI(want)),
impl: true,
},
{
- typ: PtrTo(TypeOf(StructI(want))),
+ name: "StructI",
+ typ: PtrTo(TypeOf(StructI(want))),
val: ValueOf(func() interface{} {
v := StructI(want)
return &v
@@ -4601,7 +4680,8 @@ func TestStructOfWithInterface(t *testing.T) {
impl: true,
},
{
- typ: PtrTo(TypeOf(StructIPtr(want))),
+ name: "StructIPtr",
+ typ: PtrTo(TypeOf(StructIPtr(want))),
val: ValueOf(func() interface{} {
v := StructIPtr(want)
return &v
@@ -4609,6 +4689,7 @@ func TestStructOfWithInterface(t *testing.T) {
impl: true,
},
{
+ name: "StructIPtr",
typ: TypeOf(StructIPtr(want)),
val: ValueOf(StructIPtr(want)),
impl: false,
@@ -4618,41 +4699,70 @@ func TestStructOfWithInterface(t *testing.T) {
// val: ValueOf(StructI(want)),
// impl: true,
// },
- } {
- rt := StructOf(
- []StructField{
- {
- Name: "",
+ }
+
+ for i, table := range tests {
+ for j := 0; j < 2; j++ {
+ var fields []StructField
+ if j == 1 {
+ fields = append(fields, StructField{
+ Name: "Dummy",
PkgPath: "",
- Type: table.typ,
- },
- },
- )
- rv := New(rt).Elem()
- rv.Field(0).Set(table.val)
+ Type: TypeOf(int(0)),
+ })
+ }
+ fields = append(fields, StructField{
+ Name: table.name,
+ Anonymous: true,
+ PkgPath: "",
+ Type: table.typ,
+ })
- if _, ok := rv.Interface().(Iface); ok != table.impl {
- if table.impl {
- t.Errorf("test-%d: type=%v fails to implement Iface.\n", i, table.typ)
- } else {
- t.Errorf("test-%d: type=%v should NOT implement Iface\n", i, table.typ)
+ // We currently do not correctly implement methods
+ // for anonymous fields other than the first.
+ // Therefore, for now, we expect those methods
+ // to not exist. See issues 15924 and 20824.
+ // When those issues are fixed, this test of panic
+ // should be removed.
+ if j == 1 && table.impl {
+ func() {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Errorf("test-%d-%d did not panic", i, j)
+ }
+ }()
+ _ = StructOf(fields)
+ }()
+ continue
}
- continue
- }
- if !table.impl {
- continue
- }
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ rv.Field(j).Set(table.val)
- v := rv.Interface().(Iface).Get()
- if v != want {
- t.Errorf("test-%d: x.Get()=%v. want=%v\n", i, v, want)
- }
+ if _, ok := rv.Interface().(Iface); ok != table.impl {
+ if table.impl {
+ t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ)
+ } else {
+ t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ)
+ }
+ continue
+ }
- fct := rv.MethodByName("Get")
- out := fct.Call(nil)
- if !DeepEqual(out[0].Interface(), want) {
- t.Errorf("test-%d: x.Get()=%v. want=%v\n", i, out[0].Interface(), want)
+ if !table.impl {
+ continue
+ }
+
+ v := rv.Interface().(Iface).Get()
+ if v != want {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want)
+ }
+
+ fct := rv.MethodByName("Get")
+ out := fct.Call(nil)
+ if !DeepEqual(out[0].Interface(), want) {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want)
+ }
}
}
}
@@ -4914,16 +5024,20 @@ type B1 struct {
func BenchmarkFieldByName1(b *testing.B) {
t := TypeOf(B1{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("Z")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("Z")
+ }
+ })
}
func BenchmarkFieldByName2(b *testing.B) {
t := TypeOf(S3{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("B")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("B")
+ }
+ })
}
type R0 struct {
@@ -5006,9 +5120,11 @@ func TestEmbed(t *testing.T) {
func BenchmarkFieldByName3(b *testing.B) {
t := TypeOf(R0{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("X")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("X")
+ }
+ })
}
type S struct {
@@ -5018,9 +5134,11 @@ type S struct {
func BenchmarkInterfaceBig(b *testing.B) {
v := ValueOf(S{})
- for i := 0; i < b.N; i++ {
- v.Interface()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
b.StopTimer()
}
@@ -5036,9 +5154,11 @@ func TestAllocsInterfaceBig(t *testing.T) {
func BenchmarkInterfaceSmall(b *testing.B) {
v := ValueOf(int64(0))
- for i := 0; i < b.N; i++ {
- v.Interface()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
}
func TestAllocsInterfaceSmall(t *testing.T) {
@@ -5835,7 +5955,7 @@ func TestTypeOfTypeOf(t *testing.T) {
check("SliceOf", SliceOf(TypeOf(T{})))
}
-type XM struct{}
+type XM struct{ _ bool }
func (*XM) String() string { return "" }
@@ -5861,6 +5981,24 @@ func TestMapAlloc(t *testing.T) {
if allocs > 0.5 {
t.Errorf("allocs per map assignment: want 0 got %f", allocs)
}
+
+ const size = 1000
+ tmp := 0
+ val := ValueOf(&tmp).Elem()
+ allocs = testing.AllocsPerRun(100, func() {
+ mv := MakeMapWithSize(TypeOf(map[int]int{}), size)
+ // Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets.
+ for i := 0; i < size/2; i++ {
+ val.SetInt(int64(i))
+ mv.SetMapIndex(val, val)
+ }
+ })
+ if allocs > 10 {
+ t.Errorf("allocs per map assignment: want at most 10 got %f", allocs)
+ }
+ // Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set
+ // the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the
+ // map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added.
}
func TestChanAlloc(t *testing.T) {
@@ -5984,6 +6122,8 @@ func TestTypeStrings(t *testing.T) {
{TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"},
{ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"},
{MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"},
}
for i, test := range stringTests {
@@ -6014,9 +6154,11 @@ func TestOffsetLock(t *testing.T) {
func BenchmarkNew(b *testing.B) {
v := TypeOf(XM{})
- for i := 0; i < b.N; i++ {
- New(v)
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ New(v)
+ }
+ })
}
func TestSwapper(t *testing.T) {
@@ -6091,6 +6233,7 @@ func TestSwapper(t *testing.T) {
want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}},
},
}
+
for i, tt := range tests {
inStr := fmt.Sprint(tt.in)
Swapper(tt.in)(tt.i, tt.j)
@@ -6116,3 +6259,36 @@ func TestUnaddressableField(t *testing.T) {
lv.Set(rv)
})
}
+
+type Tint int
+
+type Tint2 = Tint
+
+type Talias1 struct {
+ byte
+ uint8
+ int
+ int32
+ rune
+}
+
+type Talias2 struct {
+ Tint
+ Tint2
+}
+
+func TestAliasNames(t *testing.T) {
+ t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
+ out := fmt.Sprintf("%#v", t1)
+ want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
+ if out != want {
+ t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
+ }
+
+ t2 := Talias2{Tint: 1, Tint2: 2}
+ out = fmt.Sprintf("%#v", t2)
+ want = "reflect_test.Talias2{Tint:1, Tint2:2}"
+ if out != want {
+ t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
+ }
+}
diff --git a/libgo/go/reflect/deepequal.go b/libgo/go/reflect/deepequal.go
index f3fd704..2fdd6a3 100644
--- a/libgo/go/reflect/deepequal.go
+++ b/libgo/go/reflect/deepequal.go
@@ -178,6 +178,12 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
// DeepEqual has been defined so that the same short-cut applies
// to slices and maps: if x and y are the same slice or the same map,
// they are deeply equal regardless of content.
+//
+// As DeepEqual traverses the data values it may find a cycle. The
+// second and subsequent times that DeepEqual compares two pointer
+// values that have been compared before, it treats the values as
+// equal rather than examining the values to which they point.
+// This ensures that DeepEqual terminates.
func DeepEqual(x, y interface{}) bool {
if x == nil || y == nil {
return x == y
diff --git a/libgo/go/reflect/set_test.go b/libgo/go/reflect/set_test.go
index bc35c78..7c39623 100644
--- a/libgo/go/reflect/set_test.go
+++ b/libgo/go/reflect/set_test.go
@@ -7,6 +7,7 @@ package reflect_test
import (
"bytes"
"go/ast"
+ "go/token"
"io"
. "reflect"
"testing"
@@ -172,6 +173,23 @@ var implementsTests = []struct {
{new(bytes.Buffer), new(io.Reader), false},
{new(*bytes.Buffer), new(io.ReaderAt), false},
{new(*ast.Ident), new(ast.Expr), true},
+ {new(*notAnExpr), new(ast.Expr), false},
+ {new(*ast.Ident), new(notASTExpr), false},
+ {new(notASTExpr), new(ast.Expr), false},
+ {new(ast.Expr), new(notASTExpr), false},
+ {new(*notAnExpr), new(notASTExpr), true},
+}
+
+type notAnExpr struct{}
+
+func (notAnExpr) Pos() token.Pos { return token.NoPos }
+func (notAnExpr) End() token.Pos { return token.NoPos }
+func (notAnExpr) exprNode() {}
+
+type notASTExpr interface {
+ Pos() token.Pos
+ End() token.Pos
+ exprNode()
}
func TestImplements(t *testing.T) {
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 97b986a..664d971 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -18,6 +18,8 @@ package reflect
import (
"strconv"
"sync"
+ "unicode"
+ "unicode/utf8"
"unsafe"
)
@@ -258,6 +260,8 @@ const (
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
type rtype struct {
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
@@ -516,79 +520,52 @@ func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
func (t *rtype) common() *rtype { return t }
-func (t *uncommonType) Method(i int) (m Method) {
- if t == nil || i < 0 || i >= len(t.methods) {
- panic("reflect: Method index out of range")
- }
- found := false
- for mi := range t.methods {
- if t.methods[mi].pkgPath == nil {
- if i == 0 {
- i = mi
- found = true
- break
- }
- i--
- }
- }
- if !found {
- panic("reflect: Method index out of range")
- }
+var methodCache sync.Map // map[*rtype][]method
- p := &t.methods[i]
- if p.name != nil {
- m.Name = *p.name
- }
- fl := flag(Func)
- if p.pkgPath != nil {
- m.PkgPath = *p.pkgPath
- fl |= flagStickyRO
+func (t *rtype) exportedMethods() []method {
+ methodsi, found := methodCache.Load(t)
+ if found {
+ return methodsi.([]method)
}
- mt := p.typ
- m.Type = toType(mt)
- x := new(unsafe.Pointer)
- *x = unsafe.Pointer(&p.tfn)
- m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
- m.Index = i
- return
-}
-func (t *uncommonType) NumMethod() int {
- if t == nil {
- return 0
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
}
- c := 0
- for i := range t.methods {
- if t.methods[i].pkgPath == nil {
- c++
+ allm := ut.methods
+ allExported := true
+ for _, m := range allm {
+ if m.pkgPath != nil {
+ allExported = false
+ break
}
}
- return c
-}
-
-func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
- if t == nil {
- return
- }
- var p *method
- for i := range t.methods {
- p = &t.methods[i]
- if p.pkgPath == nil && p.name != nil && *p.name == name {
- return t.Method(i), true
+ var methods []method
+ if allExported {
+ methods = allm
+ } else {
+ methods = make([]method, 0, len(allm))
+ for _, m := range allm {
+ if m.pkgPath == nil {
+ methods = append(methods, m)
+ }
}
+ methods = methods[:len(methods):len(methods)]
}
- return
+
+ methodsi, _ = methodCache.LoadOrStore(t, methods)
+ return methodsi.([]method)
}
-// TODO(rsc): gc supplies these, but they are not
-// as efficient as they could be: they have commonType
-// as the receiver instead of *rtype.
func (t *rtype) NumMethod() int {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
}
- return t.uncommonType.NumMethod()
+ if t.uncommonType == nil {
+ return 0 // avoid methodCache synchronization
+ }
+ return len(t.exportedMethods())
}
func (t *rtype) Method(i int) (m Method) {
@@ -596,7 +573,22 @@ func (t *rtype) Method(i int) (m Method) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
}
- return t.uncommonType.Method(i)
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ if p.name != nil {
+ m.Name = *p.name
+ }
+ fl := flag(Func)
+ mt := p.typ
+ m.Type = toType(mt)
+ x := new(unsafe.Pointer)
+ *x = unsafe.Pointer(&p.tfn)
+ m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
+ m.Index = i
+ return m
}
func (t *rtype) MethodByName(name string) (m Method, ok bool) {
@@ -604,7 +596,17 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
}
- return t.uncommonType.MethodByName(name)
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+ for i := range ut.methods {
+ p := &ut.methods[i]
+ if p.pkgPath == nil && p.name != nil && *p.name == name {
+ return t.Method(i), true
+ }
+ }
+ return Method{}, false
}
func (t *rtype) PkgPath() string {
@@ -983,12 +985,11 @@ func (t *structType) FieldByNameFunc(match func(string) bool) (result StructFiel
visited[t] = true
for i := range t.fields {
f := &t.fields[i]
- // Find name and type for field f.
+ // Find name and (for anonymous field) type for field f.
fname := *f.name
var ntyp *rtype
if f.anon() {
// Anonymous field of type T or *T.
- // Name taken from type.
ntyp = f.typ
if ntyp.Kind() == Ptr {
ntyp = ntyp.Elem().common()
@@ -1072,10 +1073,7 @@ func TypeOf(i interface{}) Type {
}
// ptrMap is the cache for PtrTo.
-var ptrMap struct {
- sync.RWMutex
- m map[*rtype]*ptrType
-}
+var ptrMap sync.Map // map[*rtype]*ptrType
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
@@ -1089,24 +1087,8 @@ func (t *rtype) ptrTo() *rtype {
}
// Check the cache.
- ptrMap.RLock()
- if m := ptrMap.m; m != nil {
- if p := m[t]; p != nil {
- ptrMap.RUnlock()
- return &p.rtype
- }
- }
- ptrMap.RUnlock()
-
- ptrMap.Lock()
- if ptrMap.m == nil {
- ptrMap.m = make(map[*rtype]*ptrType)
- }
- p := ptrMap.m[t]
- if p != nil {
- // some other goroutine won the race and created it
- ptrMap.Unlock()
- return &p.rtype
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
}
s := "*" + *t.string
@@ -1115,9 +1097,9 @@ func (t *rtype) ptrTo() *rtype {
r, ok := canonicalType[s]
canonicalTypeLock.RUnlock()
if ok {
- ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
- ptrMap.Unlock()
- return r.(*rtype)
+ p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// Create a new ptrType starting with the description
@@ -1143,11 +1125,10 @@ func (t *rtype) ptrTo() *rtype {
pp.elem = t
q := canonicalize(&pp.rtype)
- p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
+ p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
- ptrMap.m[t] = p
- ptrMap.Unlock()
- return &p.rtype
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
@@ -1396,11 +1377,8 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
return false
}
-// The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
-var lookupCache struct {
- sync.RWMutex
- m map[cacheKey]*rtype
-}
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
// A cacheKey is the key for use in the lookupCache.
// Four values describe any of the types we are looking for:
@@ -1412,48 +1390,15 @@ type cacheKey struct {
extra uintptr
}
-// cacheGet looks for a type under the key k in the lookupCache.
-// If it finds one, it returns that type.
-// If not, it returns nil with the cache locked.
-// The caller is expected to use cachePut to unlock the cache.
-func cacheGet(k cacheKey) Type {
- lookupCache.RLock()
- t := lookupCache.m[k]
- lookupCache.RUnlock()
- if t != nil {
- return t
- }
-
- lookupCache.Lock()
- t = lookupCache.m[k]
- if t != nil {
- lookupCache.Unlock()
- return t
- }
-
- if lookupCache.m == nil {
- lookupCache.m = make(map[cacheKey]*rtype)
- }
-
- return nil
-}
-
-// cachePut stores the given type in the cache, unlocks the cache,
-// and returns the type. It is expected that the cache is locked
-// because cacheGet returned nil.
-func cachePut(k cacheKey, t *rtype) Type {
- t = toType(t).common()
- lookupCache.m[k] = t
- lookupCache.Unlock()
- return t
-}
-
// The funcLookupCache caches FuncOf lookups.
// FuncOf does not share the common lookupCache since cacheKey is not
// sufficient to represent functions unambiguously.
var funcLookupCache struct {
- sync.RWMutex
- m map[uint32][]*rtype // keyed by hash calculated in FuncOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
}
// ChanOf returns the channel type with the given direction and element type.
@@ -1466,13 +1411,12 @@ func ChanOf(dir ChanDir, t Type) Type {
// Look in cache.
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
- if ch := cacheGet(ckey); ch != nil {
- return ch
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
}
// This restriction is imposed by the gc compiler and the runtime.
if typ.size >= 1<<16 {
- lookupCache.Unlock()
panic("reflect.ChanOf: element size too large")
}
@@ -1481,7 +1425,6 @@ func ChanOf(dir ChanDir, t Type) Type {
var s string
switch dir {
default:
- lookupCache.Unlock()
panic("reflect.ChanOf: invalid dir")
case SendDir:
s = "chan<- " + *typ.string
@@ -1515,7 +1458,8 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
- return cachePut(ckey, &ch.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
@@ -1536,8 +1480,8 @@ func MapOf(key, elem Type) Type {
// Look in cache.
ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt := cacheGet(ckey); mt != nil {
- return mt
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
}
// Look in known types.
@@ -1576,7 +1520,8 @@ func MapOf(key, elem Type) Type {
mt.reflexivekey = isReflexive(ktyp)
mt.needkeyupdate = needKeyUpdate(ktyp)
- return cachePut(ckey, &mt.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
// FuncOf returns the function type with the given argument and result types.
@@ -1625,25 +1570,32 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.dotdotdot = variadic
// Look in cache.
- funcLookupCache.RLock()
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- funcLookupCache.RUnlock()
- return t
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
}
}
- funcLookupCache.RUnlock()
// Not in cache, lock and retry.
funcLookupCache.Lock()
defer funcLookupCache.Unlock()
- if funcLookupCache.m == nil {
- funcLookupCache.m = make(map[uint32][]*rtype)
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
}
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
}
str := funcStr(ft)
@@ -1652,10 +1604,7 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
-
- funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
-
- return toType(&ft.rtype)
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
@@ -1771,9 +1720,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
// Prepare GC data if any.
// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
- // Normally the enforced limit on pointer maps is 16 bytes,
- // but larger ones are acceptable, 33 bytes isn't too too big,
- // and it's easier to generate a pointer bitmap than a GC program.
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
var gcdata *byte
@@ -1812,7 +1758,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
- for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
if (kmask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*ktyp.size/ptrSize + i
@@ -1830,7 +1776,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
- for i := uintptr(0); i < etyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
if (emask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*etyp.size/ptrSize + i
@@ -1871,8 +1817,8 @@ func SliceOf(t Type) Type {
// Look in cache.
ckey := cacheKey{Slice, typ, nil, 0}
- if slice := cacheGet(ckey); slice != nil {
- return slice
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
}
// Look in known types.
@@ -1892,17 +1838,44 @@ func SliceOf(t Type) Type {
slice.uncommonType = nil
slice.ptrToThis = nil
- return cachePut(ckey, &slice.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin
// the memory associated with *structTypeFixedN.
var structLookupCache struct {
- sync.RWMutex
- m map[uint32][]interface {
- common() *rtype
- } // keyed by hash calculated in StructOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// isLetter returns true if a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
}
// StructOf returns the struct type containing fields.
@@ -1930,6 +1903,12 @@ func StructOf(fields []StructField) Type {
lastzero := uintptr(0)
repr = append(repr, "struct {"...)
for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
if field.Type == nil {
panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
}
@@ -1960,30 +1939,29 @@ func StructOf(fields []StructField) Type {
} else {
name = ft.String()
}
- // TODO(sbinet) check for syntactically impossible type names?
switch f.typ.Kind() {
case Interface:
ift := (*interfaceType)(unsafe.Pointer(ft))
if len(ift.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
case Ptr:
ptr := (*ptrType)(unsafe.Pointer(ft))
if unt := ptr.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
if unt := ptr.elem.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
default:
if unt := ft.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
}
@@ -2044,30 +2022,35 @@ func StructOf(fields []StructField) Type {
*typ = *prototype
typ.fields = fs
- // Look in cache
- structLookupCache.RLock()
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- structLookupCache.RUnlock()
- return t
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
}
}
- structLookupCache.RUnlock()
- // not in cache, lock and retry
+ // Not in cache, lock and retry.
structLookupCache.Lock()
defer structLookupCache.Unlock()
- if structLookupCache.m == nil {
- structLookupCache.m = make(map[uint32][]interface {
- common() *rtype
- })
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
}
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
}
typ.string = &str
@@ -2172,24 +2155,19 @@ func StructOf(fields []StructField) Type {
typ.uncommonType = nil
typ.ptrToThis = nil
- structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
- return &typ.rtype
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
- var name *string
- if field.Name == "" {
- t := field.Type.(*rtype)
- if t.Kind() == Ptr {
- t = t.Elem().(*rtype)
- }
- } else if field.PkgPath == "" {
- s := field.Name
- name = &s
- b0 := s[0]
- if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
- panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
- }
+ if field.PkgPath != "" {
+ panic("reflect.StructOf: StructOf does not allow unexported fields")
+ }
+
+ // Best-effort check for misuse.
+ // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
}
offsetAnon := uintptr(0)
@@ -2197,24 +2175,18 @@ func runtimeStructField(field StructField) structField {
offsetAnon |= 1
}
- var pkgPath *string
- if field.PkgPath != "" {
- s := field.PkgPath
- pkgPath = &s
- // This could work with gccgo but we panic to be
- // compatible with gc.
- panic("reflect: creating a name with a package path is not supported")
- }
+ s := field.Name
+ name := &s
var tag *string
if field.Tag != "" {
- s := string(field.Tag)
- tag = &s
+ st := string(field.Tag)
+ tag = &st
}
return structField{
name: name,
- pkgPath: pkgPath,
+ pkgPath: nil,
typ: field.Type.common(),
tag: tag,
offsetAnon: offsetAnon,
@@ -2257,15 +2229,11 @@ const maxPtrmaskBytes = 2048
// ArrayOf panics.
func ArrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
- // call SliceOf here as it calls cacheGet/cachePut.
- // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
- // the lookupCache mutex.
- slice := SliceOf(elem)
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(count)}
- if array := cacheGet(ckey); array != nil {
- return array
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
}
// Look in known types.
@@ -2287,9 +2255,11 @@ func ArrayOf(count int, elem Type) Type {
array.elem = typ
array.ptrToThis = nil
- max := ^uintptr(0) / typ.size
- if uintptr(count) > max {
- panic("reflect.ArrayOf: array size would exceed virtual address space")
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
}
array.size = typ.size * uintptr(count)
if count > 0 && typ.ptrdata != 0 {
@@ -2299,7 +2269,7 @@ func ArrayOf(count int, elem Type) Type {
array.fieldAlign = typ.fieldAlign
array.uncommonType = nil
array.len = uintptr(count)
- array.slice = slice.(*rtype)
+ array.slice = SliceOf(elem).(*rtype)
array.kind &^= kindNoPointers
switch {
@@ -2413,7 +2383,8 @@ func ArrayOf(count int, elem Type) Type {
}
}
- return cachePut(ckey, &array.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
@@ -2466,7 +2437,7 @@ func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
}
-// Layout matches runtime.BitVector (well enough).
+// Layout matches runtime.gobitvector (well enough).
type bitVector struct {
n uint32 // number of bits
data []byte
diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go
index 8f6a93b..792699a 100644
--- a/libgo/go/reflect/value.go
+++ b/libgo/go/reflect/value.go
@@ -30,9 +30,9 @@ const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ide
// the underlying Go value can be used concurrently for the equivalent
// direct operations.
//
-// Using == on two Values does not compare the underlying values
-// they represent, but rather the contents of the Value structs.
// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
type Value struct {
// typ holds the type of the value represented by a Value.
typ *rtype
@@ -1000,7 +1000,7 @@ func (v Value) Method(i int) Value {
return Value{v.typ, v.ptr, fl}
}
-// NumMethod returns the number of methods in the value's method set.
+// NumMethod returns the number of exported methods in the value's method set.
func (v Value) NumMethod() int {
if v.typ == nil {
panic(&ValueError{"reflect.Value.NumMethod", Invalid})
@@ -1933,12 +1933,18 @@ func MakeChan(typ Type, buffer int) Value {
return Value{typ.common(), unsafe.Pointer(&ch), flag(Chan) | flagIndir}
}
-// MakeMap creates a new map of the specified type.
+// MakeMap creates a new map with the specified type.
func MakeMap(typ Type) Value {
+ return MakeMapWithSize(typ, 0)
+}
+
+// MakeMapWithSize creates a new map with the specified type
+// and initial space for approximately n elements.
+func MakeMapWithSize(typ Type, n int) Value {
if typ.Kind() != Map {
- panic("reflect.MakeMap of non-map type")
+ panic("reflect.MakeMapWithSize of non-map type")
}
- m := makemap(typ.(*rtype))
+ m := makemap(typ.(*rtype), n)
return Value{typ.common(), unsafe.Pointer(&m), flag(Map) | flagIndir}
}
@@ -2015,7 +2021,6 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value
case directlyAssignable(dst, v.typ):
// Overwrite type so that they match.
// Same memory layout, so no harm done.
- v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
fl |= flag(dst.Kind())
return Value{dst, v.ptr, fl}
@@ -2333,7 +2338,7 @@ func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, receive
func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
func makechan(typ *rtype, size uint64) (ch unsafe.Pointer)
-func makemap(t *rtype) (m unsafe.Pointer)
+func makemap(t *rtype, cap int) (m unsafe.Pointer)
//go:noescape
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)