diff options
Diffstat (limited to 'libgo/go')
-rw-r--r-- | libgo/go/internal/reflectlite/type.go | 40 | ||||
-rw-r--r-- | libgo/go/reflect/type.go | 141 | ||||
-rw-r--r-- | libgo/go/reflect/value.go | 3 | ||||
-rw-r--r-- | libgo/go/runtime/alg.go | 89 | ||||
-rw-r--r-- | libgo/go/runtime/map.go | 63 | ||||
-rw-r--r-- | libgo/go/runtime/map_benchmark_test.go | 30 | ||||
-rw-r--r-- | libgo/go/runtime/map_fast32.go | 18 | ||||
-rw-r--r-- | libgo/go/runtime/map_fast64.go | 18 | ||||
-rw-r--r-- | libgo/go/runtime/map_faststr.go | 14 | ||||
-rw-r--r-- | libgo/go/runtime/map_test.go | 61 | ||||
-rw-r--r-- | libgo/go/runtime/type.go | 38 |
11 files changed, 306 insertions, 209 deletions
diff --git a/libgo/go/internal/reflectlite/type.go b/libgo/go/internal/reflectlite/type.go index 02b2aee..35cf1a4 100644 --- a/libgo/go/internal/reflectlite/type.go +++ b/libgo/go/internal/reflectlite/type.go @@ -110,33 +110,14 @@ const ( // available in the memory directly following the rtype value. // // tflag values must be kept in sync with copies in: -// cmd/compile/internal/gc/reflect.go -// cmd/link/internal/ld/decodesym.go +// go/types.cc // runtime/type.go type tflag uint8 const ( - // tflagUncommon means that there is a pointer, *uncommonType, - // just beyond the outer type structure. - // - // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0, - // then t has uncommonType data and it can be accessed as: - // - // type tUncommon struct { - // structType - // u uncommonType - // } - // u := &(*tUncommon)(unsafe.Pointer(t)).u - tflagUncommon tflag = 1 << 0 - - // tflagExtraStar means the name in the str field has an - // extraneous '*' prefix. This is because for most types T in - // a program, the type *T also exists and reusing the str data - // saves binary size. - tflagExtraStar tflag = 1 << 1 - - // tflagNamed means the type has a name. - tflagNamed tflag = 1 << 2 + // tflagRegularMemory means that equal and hash functions can treat + // this type as a single region of t.size bytes. + tflagRegularMemory tflag = 1 << 3 ) // rtype is the common implementation of most values. @@ -147,16 +128,15 @@ type rtype struct { size uintptr ptrdata uintptr // number of bytes in the type that can contain pointers hash uint32 // hash of type; avoids computation in hash tables - kind uint8 // enumeration for C + tflag tflag // extra type information flags align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type - _ uint8 // unused/padding - - hashfn func(unsafe.Pointer, uintptr) uintptr // hash function - equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function - + kind uint8 // enumeration for C + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + equal func(unsafe.Pointer, unsafe.Pointer) bool gcdata *byte // garbage collection data - string *string // string form; unnecessary but undeniably useful + string *string // string form; unnecessary but undeniably useful *uncommonType // (relatively) uncommon fields ptrToThis *rtype // type for pointer to this type, may be zero } diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index f82f5eb..41ed383 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -261,6 +261,20 @@ const ( UnsafePointer ) +// tflag is used by an rtype to signal what extra type information is +// available in the memory directly following the rtype value. +// +// tflag values must be kept in sync with copies in: +// go/types.cc +// runtime/type.go +type tflag uint8 + +const ( + // tflagRegularMemory means that equal and hash functions can treat + // this type as a single region of t.size bytes. + tflagRegularMemory tflag = 1 << 3 +) + // rtype is the common implementation of most values. // It is embedded in other struct types. // @@ -269,16 +283,15 @@ type rtype struct { size uintptr ptrdata uintptr // size of memory prefix holding all pointers hash uint32 // hash of type; avoids computation in hash tables - kind uint8 // enumeration for C - align int8 // alignment of variable with this type + tflag tflag // extra type information flags + align uint8 // alignment of variable with this type fieldAlign uint8 // alignment of struct field with this type - _ uint8 // unused/padding - - hashfn func(unsafe.Pointer, uintptr) uintptr // hash function - equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function - + kind uint8 // enumeration for C + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + equal func(unsafe.Pointer, unsafe.Pointer) bool gcdata *byte // garbage collection data - string *string // string form; unnecessary but undeniably useful + string *string // string form; unnecessary but undeniably useful *uncommonType // (relatively) uncommon fields ptrToThis *rtype // type for pointer to this type, if used in binary or has methods } @@ -350,9 +363,11 @@ type interfaceType struct { // mapType represents a map type. type mapType struct { rtype - key *rtype // map key type - elem *rtype // map element (value) type - bucket *rtype // internal bucket structure + key *rtype // map key type + elem *rtype // map element (value) type + bucket *rtype // internal bucket structure + // function for hashing keys (ptr to key, seed) -> hash + hasher func(unsafe.Pointer, uintptr) uintptr keysize uint8 // size of key slot valuesize uint8 // size of value slot bucketsize uint16 // size of bucket @@ -1178,31 +1193,7 @@ func (t *rtype) ConvertibleTo(u Type) bool { } func (t *rtype) Comparable() bool { - switch t.Kind() { - case Bool, Int, Int8, Int16, Int32, Int64, - Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, - Float32, Float64, Complex64, Complex128, - Chan, Interface, Ptr, String, UnsafePointer: - return true - - case Func, Map, Slice: - return false - - case Array: - return (*arrayType)(unsafe.Pointer(t)).elem.Comparable() - - case Struct: - tt := (*structType)(unsafe.Pointer(t)) - for i := range tt.fields { - if !tt.fields[i].typ.Comparable() { - return false - } - } - return true - - default: - panic("reflect: impossible") - } + return t.equal != nil } // implements reports whether the type V implements the interface type T. @@ -1457,6 +1448,7 @@ func ChanOf(dir ChanDir, t Type) Type { var ichan interface{} = (chan unsafe.Pointer)(nil) prototype := *(**chanType)(unsafe.Pointer(&ichan)) ch := *prototype + ch.tflag = tflagRegularMemory ch.dir = uintptr(dir) ch.string = &s @@ -1481,8 +1473,6 @@ func ChanOf(dir ChanDir, t Type) Type { return ti.(Type) } -func ismapkey(*rtype) bool // implemented in runtime - // MapOf returns the map type with the given key and element types. // For example, if k represents int and e represents string, // MapOf(k, e) represents map[int]string. @@ -1493,7 +1483,7 @@ func MapOf(key, elem Type) Type { ktyp := key.(*rtype) etyp := elem.(*rtype) - if !ismapkey(ktyp) { + if ktyp.equal == nil { panic("reflect.MapOf: invalid key type " + ktyp.String()) } @@ -1530,6 +1520,9 @@ func MapOf(key, elem Type) Type { mt.ptrToThis = nil mt.bucket = bucketOf(ktyp, etyp) + mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr { + return typehash(ktyp, p, seed) + } mt.flags = 0 if ktyp.size > maxKeySize { mt.keysize = uint8(ptrSize) @@ -1851,7 +1844,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype { } b := &rtype{ - align: int8(maxAlign), + align: uint8(maxAlign), fieldAlign: uint8(maxAlign), size: size, kind: uint8(Struct), @@ -1949,9 +1942,8 @@ func StructOf(fields []StructField) Type { var ( hash = uint32(12) size uintptr - typalign int8 + typalign uint8 comparable = true - hashable = true fs = make([]structField, len(fields)) repr = make([]byte, 0, 64) @@ -2036,12 +2028,11 @@ func StructOf(fields []StructField) Type { repr = append(repr, ';') } - comparable = comparable && (ft.equalfn != nil) - hashable = hashable && (ft.hashfn != nil) + comparable = comparable && (ft.equal != nil) offset := align(size, uintptr(ft.fieldAlign)) - if int8(ft.fieldAlign) > typalign { - typalign = int8(ft.fieldAlign) + if ft.fieldAlign > typalign { + typalign = ft.fieldAlign } size = offset + ft.size f.offsetEmbed |= offset << 1 @@ -2118,11 +2109,12 @@ func StructOf(fields []StructField) Type { } typ.string = &str + typ.tflag = 0 // TODO: set tflagRegularMemory typ.hash = hash typ.size = size typ.ptrdata = typeptrdata(typ.common()) typ.align = typalign - typ.fieldAlign = uint8(typalign) + typ.fieldAlign = typalign if hasGCProg { lastPtrField := 0 @@ -2189,32 +2181,18 @@ func StructOf(fields []StructField) Type { } typ.ptrdata = typeptrdata(typ.common()) - if hashable { - typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr { - o := seed - for _, ft := range typ.fields { - pi := add(p, ft.offset(), "&x.field safe") - o = ft.typ.hashfn(pi, o) - } - return o - } - } else { - typ.hashfn = nil - } - + typ.equal = nil if comparable { - typ.equalfn = func(p, q unsafe.Pointer) bool { + typ.equal = func(p, q unsafe.Pointer) bool { for _, ft := range typ.fields { pi := add(p, ft.offset(), "&x.field safe") qi := add(q, ft.offset(), "&x.field safe") - if !ft.typ.equalfn(pi, qi) { + if !ft.typ.equal(pi, qi) { return false } } return true } - } else { - typ.equalfn = nil } switch { @@ -2322,6 +2300,7 @@ func ArrayOf(count int, elem Type) Type { var iarray interface{} = [1]unsafe.Pointer{} prototype := *(**arrayType)(unsafe.Pointer(&iarray)) array := *prototype + array.tflag = typ.tflag & tflagRegularMemory array.string = &s // gccgo uses a different hash. @@ -2427,21 +2406,12 @@ func ArrayOf(count int, elem Type) Type { array.ptrdata = array.size // overestimate but ok; must match program } - switch { - case count == 1 && !ifaceIndir(typ): - // array of 1 direct iface type can be direct - array.kind |= kindDirectIface - default: - array.kind &^= kindDirectIface - } - + etyp := typ.common() esize := typ.size - if typ.equalfn == nil { - array.equalfn = nil - } else { - eequal := typ.equalfn - array.equalfn = func(p, q unsafe.Pointer) bool { + array.equal = nil + if eequal := etyp.equal; eequal != nil { + array.equal = func(p, q unsafe.Pointer) bool { for i := 0; i < count; i++ { pi := arrayAt(p, i, esize, "i < count") qi := arrayAt(q, i, esize, "i < count") @@ -2453,17 +2423,12 @@ func ArrayOf(count int, elem Type) Type { } } - if typ.hashfn == nil { - array.hashfn = nil - } else { - ehash := typ.hashfn - array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr { - o := seed - for i := 0; i < count; i++ { - o = ehash(arrayAt(ptr, i, esize, "i < count"), o) - } - return o - } + switch { + case count == 1 && !ifaceIndir(typ): + // array of 1 direct iface type can be direct + array.kind |= kindDirectIface + default: + array.kind &^= kindDirectIface } ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype) diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go index c4a62c3..15a5024 100644 --- a/libgo/go/reflect/value.go +++ b/libgo/go/reflect/value.go @@ -2543,6 +2543,9 @@ func typedmemmove(t *rtype, dst, src unsafe.Pointer) //go:noescape func typedslicecopy(elemType *rtype, dst, src sliceHeader) int +//go:noescape +func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr + // Dummy annotation marking that the value x escapes, // for use in cases where the reflect code is so clever that // the compiler cannot follow. diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go index f96a75d..e802fdd 100644 --- a/libgo/go/runtime/alg.go +++ b/libgo/go/runtime/alg.go @@ -69,6 +69,9 @@ func memhash128(p unsafe.Pointer, h uintptr) uintptr { return memhash(p, h, 16) } +// runtime variable to check if the processor we're running on +// actually supports the instructions used by the AES-based +// hash implementation. var useAeshash bool // in C code @@ -134,14 +137,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { return h } t := *(**_type)(tab) - fn := t.hashfn - if fn == nil { + if t.equal == nil { + // Check hashability here. We could do this check inside + // typehash, but we want to report the topmost type in + // the error text (e.g. in a struct with a field of slice type + // we want to report the struct, not the slice). panic(errorString("hash of unhashable type " + t.string())) } if isDirectIface(t) { - return c1 * fn(unsafe.Pointer(&a.data), h^c0) + return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { - return c1 * fn(a.data, h^c0) + return c1 * typehash(t, a.data, h^c0) } } @@ -151,17 +157,74 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { if t == nil { return h } - fn := t.hashfn - if fn == nil { + if t.equal == nil { + // See comment in interhash above. panic(errorString("hash of unhashable type " + t.string())) } if isDirectIface(t) { - return c1 * fn(unsafe.Pointer(&a.data), h^c0) + return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { - return c1 * fn(a.data, h^c0) + return c1 * typehash(t, a.data, h^c0) + } +} + +// typehash computes the hash of the object of type t at address p. +// h is the seed. +// This function is seldom used. Most maps use for hashing either +// fixed functions (e.g. f32hash) or compiler-generated functions +// (e.g. for a type like struct { x, y string }). This implementation +// is slower but more general and is used for hashing interface types +// (called from interhash or nilinterhash, above) or for hashing in +// maps generated by reflect.MapOf (reflect_typehash, below). +func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { + if t.tflag&tflagRegularMemory != 0 { + return memhash(p, h, t.size) + } + switch t.kind & kindMask { + case kindFloat32: + return f32hash(p, h) + case kindFloat64: + return f64hash(p, h) + case kindComplex64: + return c64hash(p, h) + case kindComplex128: + return c128hash(p, h) + case kindString: + return strhash(p, h) + case kindInterface: + i := (*interfacetype)(unsafe.Pointer(t)) + if len(i.methods) == 0 { + return nilinterhash(p, h) + } + return interhash(p, h) + case kindArray: + a := (*arraytype)(unsafe.Pointer(t)) + for i := uintptr(0); i < a.len; i++ { + h = typehash(a.elem, add(p, i*a.elem.size), h) + } + return h + case kindStruct: + s := (*structtype)(unsafe.Pointer(t)) + for _, f := range s.fields { + // TODO: maybe we could hash several contiguous fields all at once. + if f.name != nil && *f.name == "_" { + continue + } + h = typehash(f.typ, add(p, f.offset()), h) + } + return h + default: + // Should never happen, as typehash should only be called + // with comparable types. + panic(errorString("hash of unhashable type " + t.string())) } } +//go:linkname reflect_typehash reflect.typehash +func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { + return typehash(t, p, h) +} + func memequal0(p, q unsafe.Pointer) bool { return true } @@ -209,7 +272,7 @@ func efaceeq(x, y eface) bool { if t == nil { return true } - eq := t.equalfn + eq := t.equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } @@ -230,7 +293,7 @@ func ifaceeq(x, y iface) bool { if t != *(**_type)(y.tab) { return false } - eq := t.equalfn + eq := t.equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } @@ -251,7 +314,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool { if xt != t { return false } - eq := t.equalfn + eq := t.equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } @@ -272,7 +335,7 @@ func ifaceefaceeq(x iface, y eface) bool { if xt != y._type { return false } - eq := xt.equalfn + eq := xt.equal if eq == nil { panic(errorString("comparing uncomparable type " + xt.string())) } @@ -289,7 +352,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool { if x._type != t { return false } - eq := t.equalfn + eq := t.equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go index 349577b..3672908 100644 --- a/libgo/go/runtime/map.go +++ b/libgo/go/runtime/map.go @@ -421,18 +421,16 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if msanenabled && h != nil { msanread(key, t.key.size) } - hashfn := t.key.hashfn - equalfn := t.key.equalfn if h == nil || h.count == 0 { if t.hashMightPanic() { - hashfn(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } - hash := hashfn(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -459,7 +457,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if equalfn(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -486,18 +484,16 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) if msanenabled && h != nil { msanread(key, t.key.size) } - hashfn := t.key.hashfn - equalfn := t.key.equalfn if h == nil || h.count == 0 { if t.hashMightPanic() { - hashfn(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") } - hash := hashfn(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -524,7 +520,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if equalfn(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -546,9 +542,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe if h == nil || h.count == 0 { return nil, nil } - hashfn := t.key.hashfn - equalfn := t.key.equalfn - hash := hashfn(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -575,7 +569,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if equalfn(key, k) { + if t.key.equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -625,11 +619,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { if h.flags&hashWriting != 0 { throw("concurrent map writes") } - hashfn := t.key.hashfn - equalfn := t.key.equalfn - hash := hashfn(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) - // Set hashWriting after calling alg.hash, since alg.hash may panic, + // Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write. h.flags ^= hashWriting @@ -666,7 +658,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if !equalfn(key, k) { + if !t.key.equal(key, k) { continue } // already have a mapping for key. Update it. @@ -735,11 +727,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { if msanenabled && h != nil { msanread(key, t.key.size) } - hashfn := t.key.hashfn - equalfn := t.key.equalfn if h == nil || h.count == 0 { if t.hashMightPanic() { - hashfn(key, 0) // see issue 23734 + t.hasher(key, 0) // see issue 23734 } return } @@ -747,9 +737,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { throw("concurrent map writes") } - hash := hashfn(key, uintptr(h.hash0)) + hash := t.hasher(key, uintptr(h.hash0)) - // Set hashWriting after calling alg.hash, since alg.hash may panic, + // Set hashWriting after calling t.hasher, since t.hasher may panic, // in which case we have not actually done a write (delete). h.flags ^= hashWriting @@ -774,7 +764,7 @@ search: if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } - if !equalfn(key, k2) { + if !t.key.equal(key, k2) { continue } // Only clear key if there are pointers in it. @@ -925,8 +915,6 @@ func mapiternext(it *hiter) { b := it.bptr i := it.i checkBucket := it.checkBucket - hashfn := t.key.hashfn - equalfn := t.key.equalfn next: if b == nil { @@ -980,10 +968,10 @@ next: // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). - if t.reflexivekey() || equalfn(k, k) { + if t.reflexivekey() || t.key.equal(k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. - hash := hashfn(k, uintptr(h.hash0)) + hash := t.hasher(k, uintptr(h.hash0)) if hash&bucketMask(it.B) != checkBucket { continue } @@ -1001,7 +989,7 @@ next: } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.reflexivekey() || equalfn(k, k)) { + !(t.reflexivekey() || t.key.equal(k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. @@ -1238,8 +1226,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.key.hashfn(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) { + hash := t.hasher(k2, uintptr(h.hash0)) + if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the @@ -1333,16 +1321,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { } } -func ismapkey(t *_type) bool { - return t.hashfn != nil -} - // Reflect stubs. Called from ../reflect/asm_*.s //go:linkname reflect_makemap reflect.makemap func reflect_makemap(t *maptype, cap int) *hmap { // Check invariants and reflects math. - if !ismapkey(t.key) { + if t.key.equal == nil { throw("runtime.reflect_makemap: unsupported map key type") } if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) || @@ -1445,10 +1429,5 @@ func reflectlite_maplen(h *hmap) int { return h.count } -//go:linkname reflect_ismapkey reflect.ismapkey -func reflect_ismapkey(t *_type) bool { - return ismapkey(t) -} - const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go var zeroVal [maxZero]byte diff --git a/libgo/go/runtime/map_benchmark_test.go b/libgo/go/runtime/map_benchmark_test.go index d37dadc..cf04ead 100644 --- a/libgo/go/runtime/map_benchmark_test.go +++ b/libgo/go/runtime/map_benchmark_test.go @@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) { }) } } + +var BoolSink bool + +func BenchmarkMapInterfaceString(b *testing.B) { + m := map[interface{}]bool{} + + for i := 0; i < 100; i++ { + m[fmt.Sprintf("%d", i)] = true + } + + key := (interface{})("A") + b.ResetTimer() + for i := 0; i < b.N; i++ { + BoolSink = m[key] + } +} +func BenchmarkMapInterfacePtr(b *testing.B) { + m := map[interface{}]bool{} + + for i := 0; i < 100; i++ { + i := i + m[&i] = true + } + + key := new(int) + b.ResetTimer() + for i := 0; i < b.N; i++ { + BoolSink = m[key] + } +} diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go index 57b3c0f..fdc7f0e 100644 --- a/libgo/go/runtime/map_fast32.go +++ b/libgo/go/runtime/map_fast32.go @@ -33,7 +33,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) m := bucketMask(h.B) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -73,7 +73,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) m := bucketMask(h.B) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -108,9 +108,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { if h.flags&hashWriting != 0 { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapassign. + // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { @@ -198,9 +198,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer if h.flags&hashWriting != 0 { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapassign. + // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { @@ -289,9 +289,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapdelete + // Set hashWriting after calling t.hasher for consistency with mapdelete h.flags ^= hashWriting bucket := hash & bucketMask(h.B) @@ -408,7 +408,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.key.hashfn(k, uintptr(h.hash0)) + hash := t.hasher(k, uintptr(h.hash0)) if hash&newbit != 0 { useY = 1 } diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go index af86f74..26c60ae 100644 --- a/libgo/go/runtime/map_fast64.go +++ b/libgo/go/runtime/map_fast64.go @@ -33,7 +33,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) m := bucketMask(h.B) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -73,7 +73,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) m := bucketMask(h.B) b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -108,9 +108,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { if h.flags&hashWriting != 0 { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapassign. + // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { @@ -198,9 +198,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer if h.flags&hashWriting != 0 { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapassign. + // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { @@ -289,9 +289,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { throw("concurrent map writes") } - hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapdelete + // Set hashWriting after calling t.hasher for consistency with mapdelete h.flags ^= hashWriting bucket := hash & bucketMask(h.B) @@ -408,7 +408,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.key.hashfn(k, uintptr(h.hash0)) + hash := t.hasher(k, uintptr(h.hash0)) if hash&newbit != 0 { useY = 1 } diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go index 3c5175d..1775214 100644 --- a/libgo/go/runtime/map_faststr.go +++ b/libgo/go/runtime/map_faststr.go @@ -83,7 +83,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { return unsafe.Pointer(&zeroVal[0]) } dohash: - hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -178,7 +178,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { return unsafe.Pointer(&zeroVal[0]), false } dohash: - hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) m := bucketMask(h.B) b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) if c := h.oldbuckets; c != nil { @@ -218,9 +218,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { throw("concurrent map writes") } key := stringStructOf(&s) - hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapassign. + // Set hashWriting after calling t.hasher for consistency with mapassign. h.flags ^= hashWriting if h.buckets == nil { @@ -314,9 +314,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { } key := stringStructOf(&ky) - hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) + hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - // Set hashWriting after calling alg.hash for consistency with mapdelete + // Set hashWriting after calling t.hasher for consistency with mapdelete h.flags ^= hashWriting bucket := hash & bucketMask(h.B) @@ -436,7 +436,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { if !h.sameSizeGrow() { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). - hash := t.key.hashfn(k, uintptr(h.hash0)) + hash := t.hasher(k, uintptr(h.hash0)) if hash&newbit != 0 { useY = 1 } diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go index bc5f738..b13d269 100644 --- a/libgo/go/runtime/map_test.go +++ b/libgo/go/runtime/map_test.go @@ -1172,3 +1172,64 @@ func TestMapTombstones(t *testing.T) { } runtime.MapTombstoneCheck(m) } + +type canString int + +func (c canString) String() string { + return fmt.Sprintf("%d", int(c)) +} + +func TestMapInterfaceKey(t *testing.T) { + // Test all the special cases in runtime.typehash. + type GrabBag struct { + f32 float32 + f64 float64 + c64 complex64 + c128 complex128 + s string + i0 interface{} + i1 interface { + String() string + } + a [4]string + } + + m := map[interface{}]bool{} + // Put a bunch of data in m, so that a bad hash is likely to + // lead to a bad bucket, which will lead to a missed lookup. + for i := 0; i < 1000; i++ { + m[i] = true + } + m[GrabBag{f32: 1.0}] = true + if !m[GrabBag{f32: 1.0}] { + panic("f32 not found") + } + m[GrabBag{f64: 1.0}] = true + if !m[GrabBag{f64: 1.0}] { + panic("f64 not found") + } + m[GrabBag{c64: 1.0i}] = true + if !m[GrabBag{c64: 1.0i}] { + panic("c64 not found") + } + m[GrabBag{c128: 1.0i}] = true + if !m[GrabBag{c128: 1.0i}] { + panic("c128 not found") + } + m[GrabBag{s: "foo"}] = true + if !m[GrabBag{s: "foo"}] { + panic("string not found") + } + m[GrabBag{i0: "foo"}] = true + if !m[GrabBag{i0: "foo"}] { + panic("interface{} not found") + } + m[GrabBag{i1: canString(5)}] = true + if !m[GrabBag{i1: canString(5)}] { + panic("interface{String() string} not found") + } + m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true + if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] { + panic("array not found") + } +} diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go index 63ad310..94abbb8 100644 --- a/libgo/go/runtime/type.go +++ b/libgo/go/runtime/type.go @@ -12,18 +12,32 @@ import ( "unsafe" ) +// tflag is documented in reflect/type.go. +// +// tflag values must be kept in sync with copies in: +// go/types.cc +// reflect/type.go +// internal/reflectlite/type.go +type tflag uint8 + +const ( + tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes +) + type _type struct { size uintptr ptrdata uintptr hash uint32 - kind uint8 - align int8 + tflag tflag + align uint8 fieldAlign uint8 - _ uint8 - - hashfn func(unsafe.Pointer, uintptr) uintptr - equalfn func(unsafe.Pointer, unsafe.Pointer) bool - + kind uint8 + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + equal func(unsafe.Pointer, unsafe.Pointer) bool + // gcdata stores the GC type data for the garbage collector. + // If the KindGCProg bit is set in kind, gcdata is a GC program. + // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. gcdata *byte _string *string *uncommontype @@ -74,10 +88,12 @@ type interfacetype struct { } type maptype struct { - typ _type - key *_type - elem *_type - bucket *_type // internal type representing a hash bucket + typ _type + key *_type + elem *_type + bucket *_type // internal type representing a hash bucket + // function for hashing keys (ptr to key, seed) -> hash + hasher func(unsafe.Pointer, uintptr) uintptr keysize uint8 // size of key slot elemsize uint8 // size of elem slot bucketsize uint16 // size of bucket |